code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def removeRedundantVerbChains( foundChains, removeOverlapping = True, removeSingleAraAndEi = False ):
''' Eemaldab yleliigsed verbiahelad: ahelad, mis katavad osaliselt v6i t2ielikult
teisi ahelaid (removeOverlapping == True), yhes6nalised 'ei' ja 'Γ€ra' ahelad (kui
removeSingleAraAndEi == True);
Yldiselt on nii, et ylekattuvaid ei tohiks palju olla, kuna fraaside laiendamisel
pyytakse alati kontrollida, et laiendus ei kattuks m6ne olemasoleva fraasiga;
Peamiselt tekivad ylekattuvused siis, kui morf analyysi on sattunud valed
finiitverbi analyysid (v6i analyysid on j22nud mitmesteks) ja seega tuvastatakse
osalausest rohkem finiitverbe, kui oleks vaja.
Heuristik: kahe ylekattuva puhul j2tame alles fraasi, mis algab eespool ning
m2rgime sellel OTHER_VERBS v22rtuseks True, mis m2rgib, et kontekstis on mingi
segadus teiste verbidega.
'''
toDelete = []
for i in range(len(foundChains)):
matchObj1 = foundChains[i]
if removeOverlapping:
for j in range(i+1, len(foundChains)):
matchObj2 = foundChains[j]
if matchObj1 != matchObj2 and matchObj1[CLAUSE_IDX] == matchObj2[CLAUSE_IDX]:
phrase1 = set(matchObj1[PHRASE])
phrase2 = set(matchObj2[PHRASE])
intersect = phrase1.intersection(phrase2)
if len(intersect) > 0:
# Yldiselt on nii, et ylekattuvaid ei tohiks olla, kuna fraaside laiendamisel
# pyytakse alati kontrollida, et laiendus ei kattuks m6ne olemasoleva fraasiga;
# Peamiselt tekivad ylekattuvused siis, kui morf analyysil on finiitverbi
# analyysidesse j22nud sisse mitmesused (v6i on sattunud valed analyysid) ja
# seega tuvastatakse osalausest rohkem finiitverbe, kui oleks vaja.
# Heuristik: j2tame alles fraasi, mis algab eespool ning lisame selle otsa
# kysim2rgi (kuna pole kindel, et asjad on korras)
minWid1 = min(matchObj1[PHRASE])
minWid2 = min(matchObj2[PHRASE])
if minWid1 < minWid2:
matchObj1[OTHER_VERBS] = True
toDelete.append(j)
else:
matchObj2[OTHER_VERBS] = True
toDelete.append(i)
if removeSingleAraAndEi:
if ( len(matchObj1[PATTERN])==1 and re.match('^(ei|Γ€ra)$', matchObj1[PATTERN][0]) ):
toDelete.append(i)
if toDelete:
if len(set(toDelete)) != len(toDelete):
toDelete = list(set(toDelete)) # Eemaldame duplikaadid
toDelete = [ foundChains[i] for i in toDelete ]
for verbObj in toDelete:
foundChains.remove(verbObj) | Eemaldab yleliigsed verbiahelad: ahelad, mis katavad osaliselt v6i t2ielikult
teisi ahelaid (removeOverlapping == True), yhes6nalised 'ei' ja 'Γ€ra' ahelad (kui
removeSingleAraAndEi == True);
Yldiselt on nii, et ylekattuvaid ei tohiks palju olla, kuna fraaside laiendamisel
pyytakse alati kontrollida, et laiendus ei kattuks m6ne olemasoleva fraasiga;
Peamiselt tekivad ylekattuvused siis, kui morf analyysi on sattunud valed
finiitverbi analyysid (v6i analyysid on j22nud mitmesteks) ja seega tuvastatakse
osalausest rohkem finiitverbe, kui oleks vaja.
Heuristik: kahe ylekattuva puhul j2tame alles fraasi, mis algab eespool ning
m2rgime sellel OTHER_VERBS v22rtuseks True, mis m2rgib, et kontekstis on mingi
segadus teiste verbidega. | Below is the the instruction that describes the task:
### Input:
Eemaldab yleliigsed verbiahelad: ahelad, mis katavad osaliselt v6i t2ielikult
teisi ahelaid (removeOverlapping == True), yhes6nalised 'ei' ja 'Γ€ra' ahelad (kui
removeSingleAraAndEi == True);
Yldiselt on nii, et ylekattuvaid ei tohiks palju olla, kuna fraaside laiendamisel
pyytakse alati kontrollida, et laiendus ei kattuks m6ne olemasoleva fraasiga;
Peamiselt tekivad ylekattuvused siis, kui morf analyysi on sattunud valed
finiitverbi analyysid (v6i analyysid on j22nud mitmesteks) ja seega tuvastatakse
osalausest rohkem finiitverbe, kui oleks vaja.
Heuristik: kahe ylekattuva puhul j2tame alles fraasi, mis algab eespool ning
m2rgime sellel OTHER_VERBS v22rtuseks True, mis m2rgib, et kontekstis on mingi
segadus teiste verbidega.
### Response:
def removeRedundantVerbChains( foundChains, removeOverlapping = True, removeSingleAraAndEi = False ):
''' Eemaldab yleliigsed verbiahelad: ahelad, mis katavad osaliselt v6i t2ielikult
teisi ahelaid (removeOverlapping == True), yhes6nalised 'ei' ja 'Γ€ra' ahelad (kui
removeSingleAraAndEi == True);
Yldiselt on nii, et ylekattuvaid ei tohiks palju olla, kuna fraaside laiendamisel
pyytakse alati kontrollida, et laiendus ei kattuks m6ne olemasoleva fraasiga;
Peamiselt tekivad ylekattuvused siis, kui morf analyysi on sattunud valed
finiitverbi analyysid (v6i analyysid on j22nud mitmesteks) ja seega tuvastatakse
osalausest rohkem finiitverbe, kui oleks vaja.
Heuristik: kahe ylekattuva puhul j2tame alles fraasi, mis algab eespool ning
m2rgime sellel OTHER_VERBS v22rtuseks True, mis m2rgib, et kontekstis on mingi
segadus teiste verbidega.
'''
toDelete = []
for i in range(len(foundChains)):
matchObj1 = foundChains[i]
if removeOverlapping:
for j in range(i+1, len(foundChains)):
matchObj2 = foundChains[j]
if matchObj1 != matchObj2 and matchObj1[CLAUSE_IDX] == matchObj2[CLAUSE_IDX]:
phrase1 = set(matchObj1[PHRASE])
phrase2 = set(matchObj2[PHRASE])
intersect = phrase1.intersection(phrase2)
if len(intersect) > 0:
# Yldiselt on nii, et ylekattuvaid ei tohiks olla, kuna fraaside laiendamisel
# pyytakse alati kontrollida, et laiendus ei kattuks m6ne olemasoleva fraasiga;
# Peamiselt tekivad ylekattuvused siis, kui morf analyysil on finiitverbi
# analyysidesse j22nud sisse mitmesused (v6i on sattunud valed analyysid) ja
# seega tuvastatakse osalausest rohkem finiitverbe, kui oleks vaja.
# Heuristik: j2tame alles fraasi, mis algab eespool ning lisame selle otsa
# kysim2rgi (kuna pole kindel, et asjad on korras)
minWid1 = min(matchObj1[PHRASE])
minWid2 = min(matchObj2[PHRASE])
if minWid1 < minWid2:
matchObj1[OTHER_VERBS] = True
toDelete.append(j)
else:
matchObj2[OTHER_VERBS] = True
toDelete.append(i)
if removeSingleAraAndEi:
if ( len(matchObj1[PATTERN])==1 and re.match('^(ei|Γ€ra)$', matchObj1[PATTERN][0]) ):
toDelete.append(i)
if toDelete:
if len(set(toDelete)) != len(toDelete):
toDelete = list(set(toDelete)) # Eemaldame duplikaadid
toDelete = [ foundChains[i] for i in toDelete ]
for verbObj in toDelete:
foundChains.remove(verbObj) |
def sum(self):
"""
Compute the sum across records.
"""
return self._constructor(self.values.sum(axis=self.baseaxes, keepdims=True)) | Compute the sum across records. | Below is the the instruction that describes the task:
### Input:
Compute the sum across records.
### Response:
def sum(self):
"""
Compute the sum across records.
"""
return self._constructor(self.values.sum(axis=self.baseaxes, keepdims=True)) |
def query(
self, queryEngine, query_str, vendorSpecific=None, do_post=False, **kwargs
):
"""See Also: queryResponse()
Args:
queryEngine:
query_str:
vendorSpecific:
do_post:
**kwargs:
Returns:
"""
response = self.queryResponse(
queryEngine, query_str, vendorSpecific, do_post, **kwargs
)
if self._content_type_is_json(response):
return self._read_json_response(response)
else:
return self._read_stream_response(response) | See Also: queryResponse()
Args:
queryEngine:
query_str:
vendorSpecific:
do_post:
**kwargs:
Returns: | Below is the the instruction that describes the task:
### Input:
See Also: queryResponse()
Args:
queryEngine:
query_str:
vendorSpecific:
do_post:
**kwargs:
Returns:
### Response:
def query(
self, queryEngine, query_str, vendorSpecific=None, do_post=False, **kwargs
):
"""See Also: queryResponse()
Args:
queryEngine:
query_str:
vendorSpecific:
do_post:
**kwargs:
Returns:
"""
response = self.queryResponse(
queryEngine, query_str, vendorSpecific, do_post, **kwargs
)
if self._content_type_is_json(response):
return self._read_json_response(response)
else:
return self._read_stream_response(response) |
def passageLoop(parent, new_tree, xpath1, xpath2=None, preceding_siblings=False, following_siblings=False):
""" Loop over passages to construct and increment new tree given a parent and XPaths
:param parent: Parent on which to perform xpath
:param new_tree: Parent on which to add nodes
:param xpath1: List of xpath elements
:type xpath1: [str]
:param xpath2: List of xpath elements
:type xpath2: [str]
:param preceding_siblings: Append preceding siblings of XPath 1/2 match to the tree
:param following_siblings: Append following siblings of XPath 1/2 match to the tree
:return: Newly incremented tree
"""
current_1, queue_1 = __formatXpath__(xpath1)
if xpath2 is None: # In case we need what is following or preceding our node
result_1, loop = performXpath(parent, current_1)
if loop is True:
queue_1 = xpath1
central = None
has_no_queue = len(queue_1) == 0
# For each sibling, when we need them in the context of a range
if preceding_siblings or following_siblings:
for sibling in xmliter(parent):
if sibling == result_1:
central = True
# We copy the node we looked for (Result_1)
child = copyNode(result_1, children=has_no_queue, parent=new_tree)
# if we don't have children
# we loop over the passage child
if not has_no_queue:
passageLoop(
result_1,
child,
queue_1,
None,
preceding_siblings=preceding_siblings,
following_siblings=following_siblings
)
# If we were waiting for preceding_siblings, we break it off
# As we don't need to go further
if preceding_siblings:
break
elif not central and preceding_siblings:
copyNode(sibling, parent=new_tree, children=True)
elif central and following_siblings:
copyNode(sibling, parent=new_tree, children=True)
else:
result_1, loop = performXpath(parent, current_1)
if loop is True:
queue_1 = xpath1
if xpath2 == xpath1:
current_2, queue_2 = current_1, queue_1
else:
current_2, queue_2 = __formatXpath__(xpath2)
else:
current_2, queue_2 = __formatXpath__(xpath2)
if xpath1 != xpath2:
result_2, loop = performXpath(parent, current_2)
if loop is True:
queue_2 = xpath2
else:
result_2 = result_1
if result_1 == result_2:
has_no_queue = len(queue_1) == 0
child = copyNode(result_1, children=has_no_queue, parent=new_tree)
if not has_no_queue:
passageLoop(
result_1,
child,
queue_1,
queue_2
)
else:
start = False
# For each sibling
for sibling in xmliter(parent):
# If we have found start
# We copy the node because we are between start and end
if start:
# If we are at the end
# We break the copy
if sibling == result_2:
break
else:
copyNode(sibling, parent=new_tree, children=True)
# If this is start
# Then we copy it and initiate star
elif sibling == result_1:
start = True
has_no_queue_1 = len(queue_1) == 0
node = copyNode(sibling, children=has_no_queue_1, parent=new_tree)
if not has_no_queue_1:
passageLoop(sibling, node, queue_1, None, following_siblings=True)
continue_loop = len(queue_2) == 0
node = copyNode(result_2, children=continue_loop, parent=new_tree)
if not continue_loop:
passageLoop(result_2, node, queue_2, None, preceding_siblings=True)
return new_tree | Loop over passages to construct and increment new tree given a parent and XPaths
:param parent: Parent on which to perform xpath
:param new_tree: Parent on which to add nodes
:param xpath1: List of xpath elements
:type xpath1: [str]
:param xpath2: List of xpath elements
:type xpath2: [str]
:param preceding_siblings: Append preceding siblings of XPath 1/2 match to the tree
:param following_siblings: Append following siblings of XPath 1/2 match to the tree
:return: Newly incremented tree | Below is the the instruction that describes the task:
### Input:
Loop over passages to construct and increment new tree given a parent and XPaths
:param parent: Parent on which to perform xpath
:param new_tree: Parent on which to add nodes
:param xpath1: List of xpath elements
:type xpath1: [str]
:param xpath2: List of xpath elements
:type xpath2: [str]
:param preceding_siblings: Append preceding siblings of XPath 1/2 match to the tree
:param following_siblings: Append following siblings of XPath 1/2 match to the tree
:return: Newly incremented tree
### Response:
def passageLoop(parent, new_tree, xpath1, xpath2=None, preceding_siblings=False, following_siblings=False):
""" Loop over passages to construct and increment new tree given a parent and XPaths
:param parent: Parent on which to perform xpath
:param new_tree: Parent on which to add nodes
:param xpath1: List of xpath elements
:type xpath1: [str]
:param xpath2: List of xpath elements
:type xpath2: [str]
:param preceding_siblings: Append preceding siblings of XPath 1/2 match to the tree
:param following_siblings: Append following siblings of XPath 1/2 match to the tree
:return: Newly incremented tree
"""
current_1, queue_1 = __formatXpath__(xpath1)
if xpath2 is None: # In case we need what is following or preceding our node
result_1, loop = performXpath(parent, current_1)
if loop is True:
queue_1 = xpath1
central = None
has_no_queue = len(queue_1) == 0
# For each sibling, when we need them in the context of a range
if preceding_siblings or following_siblings:
for sibling in xmliter(parent):
if sibling == result_1:
central = True
# We copy the node we looked for (Result_1)
child = copyNode(result_1, children=has_no_queue, parent=new_tree)
# if we don't have children
# we loop over the passage child
if not has_no_queue:
passageLoop(
result_1,
child,
queue_1,
None,
preceding_siblings=preceding_siblings,
following_siblings=following_siblings
)
# If we were waiting for preceding_siblings, we break it off
# As we don't need to go further
if preceding_siblings:
break
elif not central and preceding_siblings:
copyNode(sibling, parent=new_tree, children=True)
elif central and following_siblings:
copyNode(sibling, parent=new_tree, children=True)
else:
result_1, loop = performXpath(parent, current_1)
if loop is True:
queue_1 = xpath1
if xpath2 == xpath1:
current_2, queue_2 = current_1, queue_1
else:
current_2, queue_2 = __formatXpath__(xpath2)
else:
current_2, queue_2 = __formatXpath__(xpath2)
if xpath1 != xpath2:
result_2, loop = performXpath(parent, current_2)
if loop is True:
queue_2 = xpath2
else:
result_2 = result_1
if result_1 == result_2:
has_no_queue = len(queue_1) == 0
child = copyNode(result_1, children=has_no_queue, parent=new_tree)
if not has_no_queue:
passageLoop(
result_1,
child,
queue_1,
queue_2
)
else:
start = False
# For each sibling
for sibling in xmliter(parent):
# If we have found start
# We copy the node because we are between start and end
if start:
# If we are at the end
# We break the copy
if sibling == result_2:
break
else:
copyNode(sibling, parent=new_tree, children=True)
# If this is start
# Then we copy it and initiate star
elif sibling == result_1:
start = True
has_no_queue_1 = len(queue_1) == 0
node = copyNode(sibling, children=has_no_queue_1, parent=new_tree)
if not has_no_queue_1:
passageLoop(sibling, node, queue_1, None, following_siblings=True)
continue_loop = len(queue_2) == 0
node = copyNode(result_2, children=continue_loop, parent=new_tree)
if not continue_loop:
passageLoop(result_2, node, queue_2, None, preceding_siblings=True)
return new_tree |
def _map(self, event):
"""Extract elements from an operation event and map to a named event."""
description = event.get('description', '')
start_time = google_base.parse_rfc3339_utc_string(
event.get('timestamp', ''))
for name, regex in _EVENT_REGEX_MAP.items():
match = regex.match(description)
if match:
return {'name': name, 'start-time': start_time}, match
return {'name': description, 'start-time': start_time}, None | Extract elements from an operation event and map to a named event. | Below is the the instruction that describes the task:
### Input:
Extract elements from an operation event and map to a named event.
### Response:
def _map(self, event):
"""Extract elements from an operation event and map to a named event."""
description = event.get('description', '')
start_time = google_base.parse_rfc3339_utc_string(
event.get('timestamp', ''))
for name, regex in _EVENT_REGEX_MAP.items():
match = regex.match(description)
if match:
return {'name': name, 'start-time': start_time}, match
return {'name': description, 'start-time': start_time}, None |
def open(self, dbname=None):
'''
Open database from the path with the name or latest.
If there are no yet databases, create a new implicitly.
:return:
'''
databases = self.list()
if self.is_closed():
self.db_path = os.path.join(self.path, dbname or (databases and databases[0] or self.new()))
if not self._opened:
self.list_tables()
self._opened = True | Open database from the path with the name or latest.
If there are no yet databases, create a new implicitly.
:return: | Below is the the instruction that describes the task:
### Input:
Open database from the path with the name or latest.
If there are no yet databases, create a new implicitly.
:return:
### Response:
def open(self, dbname=None):
'''
Open database from the path with the name or latest.
If there are no yet databases, create a new implicitly.
:return:
'''
databases = self.list()
if self.is_closed():
self.db_path = os.path.join(self.path, dbname or (databases and databases[0] or self.new()))
if not self._opened:
self.list_tables()
self._opened = True |
def main():
"""
Entry point
"""
parser = ArgumentParser()
parser.add_argument("direction",
choices=(
"up", "down", "left", "right",
"next", "prev"
),
help="Direction to put the focus on")
args = parser.parse_args()
tree = i3Tree()
con = None
if args.direction in ("next", "prev"):
con = cycle_outputs(tree, args.direction)
else:
con = cycle_windows(tree, args.direction)
if con:
i3.focus(con_id=con.id) | Entry point | Below is the the instruction that describes the task:
### Input:
Entry point
### Response:
def main():
"""
Entry point
"""
parser = ArgumentParser()
parser.add_argument("direction",
choices=(
"up", "down", "left", "right",
"next", "prev"
),
help="Direction to put the focus on")
args = parser.parse_args()
tree = i3Tree()
con = None
if args.direction in ("next", "prev"):
con = cycle_outputs(tree, args.direction)
else:
con = cycle_windows(tree, args.direction)
if con:
i3.focus(con_id=con.id) |
def start(self):
"""
Start executor with additional checks.
Checks if previous executor isn't running then start process
(executor) and wait until it's started.
:returns: itself
:rtype: Executor
"""
if self.pre_start_check():
# Some other executor (or process) is running with same config:
raise AlreadyRunning(self)
super(Executor, self).start()
self.wait_for(self.check_subprocess)
return self | Start executor with additional checks.
Checks if previous executor isn't running then start process
(executor) and wait until it's started.
:returns: itself
:rtype: Executor | Below is the the instruction that describes the task:
### Input:
Start executor with additional checks.
Checks if previous executor isn't running then start process
(executor) and wait until it's started.
:returns: itself
:rtype: Executor
### Response:
def start(self):
"""
Start executor with additional checks.
Checks if previous executor isn't running then start process
(executor) and wait until it's started.
:returns: itself
:rtype: Executor
"""
if self.pre_start_check():
# Some other executor (or process) is running with same config:
raise AlreadyRunning(self)
super(Executor, self).start()
self.wait_for(self.check_subprocess)
return self |
def process_delivery(message, notification):
"""Function to process a delivery notification"""
mail = message['mail']
delivery = message['delivery']
if 'timestamp' in delivery:
delivered_datetime = clean_time(delivery['timestamp'])
else:
delivered_datetime = None
deliveries = []
for eachrecipient in delivery['recipients']:
# Create each delivery
deliveries += [Delivery.objects.create(
sns_topic=notification['TopicArn'],
sns_messageid=notification['MessageId'],
mail_timestamp=clean_time(mail['timestamp']),
mail_id=mail['messageId'],
mail_from=mail['source'],
address=eachrecipient,
# delivery
delivered_time=delivered_datetime,
processing_time=int(delivery['processingTimeMillis']),
smtp_response=delivery['smtpResponse']
)]
# Send signals for each delivery.
for eachdelivery in deliveries:
signals.feedback.send(
sender=Delivery,
instance=eachdelivery,
message=message,
notification=notification
)
logger.info('Logged %s Deliveries(s)', str(len(deliveries)))
return HttpResponse('Delivery Processed') | Function to process a delivery notification | Below is the the instruction that describes the task:
### Input:
Function to process a delivery notification
### Response:
def process_delivery(message, notification):
"""Function to process a delivery notification"""
mail = message['mail']
delivery = message['delivery']
if 'timestamp' in delivery:
delivered_datetime = clean_time(delivery['timestamp'])
else:
delivered_datetime = None
deliveries = []
for eachrecipient in delivery['recipients']:
# Create each delivery
deliveries += [Delivery.objects.create(
sns_topic=notification['TopicArn'],
sns_messageid=notification['MessageId'],
mail_timestamp=clean_time(mail['timestamp']),
mail_id=mail['messageId'],
mail_from=mail['source'],
address=eachrecipient,
# delivery
delivered_time=delivered_datetime,
processing_time=int(delivery['processingTimeMillis']),
smtp_response=delivery['smtpResponse']
)]
# Send signals for each delivery.
for eachdelivery in deliveries:
signals.feedback.send(
sender=Delivery,
instance=eachdelivery,
message=message,
notification=notification
)
logger.info('Logged %s Deliveries(s)', str(len(deliveries)))
return HttpResponse('Delivery Processed') |
def check(self):
"""
Compare the :func:`os.stat` for the pam_env style environmnt file
`path` with the previous result `old_st`, which may be :data:`None` if
the previous stat attempt failed. Reload its contents if the file has
changed or appeared since last attempt.
:returns:
New :func:`os.stat` result. The new call to :func:`reload_env` should
pass it as the value of `old_st`.
"""
st = self._stat()
if self._st == st:
return
self._st = st
self._remove_existing()
if st is None:
LOG.debug('%r: file has disappeared', self)
else:
self._on_file_changed() | Compare the :func:`os.stat` for the pam_env style environmnt file
`path` with the previous result `old_st`, which may be :data:`None` if
the previous stat attempt failed. Reload its contents if the file has
changed or appeared since last attempt.
:returns:
New :func:`os.stat` result. The new call to :func:`reload_env` should
pass it as the value of `old_st`. | Below is the the instruction that describes the task:
### Input:
Compare the :func:`os.stat` for the pam_env style environmnt file
`path` with the previous result `old_st`, which may be :data:`None` if
the previous stat attempt failed. Reload its contents if the file has
changed or appeared since last attempt.
:returns:
New :func:`os.stat` result. The new call to :func:`reload_env` should
pass it as the value of `old_st`.
### Response:
def check(self):
"""
Compare the :func:`os.stat` for the pam_env style environmnt file
`path` with the previous result `old_st`, which may be :data:`None` if
the previous stat attempt failed. Reload its contents if the file has
changed or appeared since last attempt.
:returns:
New :func:`os.stat` result. The new call to :func:`reload_env` should
pass it as the value of `old_st`.
"""
st = self._stat()
if self._st == st:
return
self._st = st
self._remove_existing()
if st is None:
LOG.debug('%r: file has disappeared', self)
else:
self._on_file_changed() |
def delete(self):
"""Delete the draft."""
response = self.session.request("delete:Message", [ self.message_id ])
self.data = response
return self | Delete the draft. | Below is the the instruction that describes the task:
### Input:
Delete the draft.
### Response:
def delete(self):
"""Delete the draft."""
response = self.session.request("delete:Message", [ self.message_id ])
self.data = response
return self |
def mongodb(line):
'''
>>> import pprint
>>> input_line1 = '2017-08-17T07:56:33.489+0200 I REPL [signalProcessingThread] shutting down replication subsystems'
>>> output_line1 = mongodb(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'component': 'REPL',
'context': '[signalProcessingThread]',
'message': 'shutting down replication subsystems',
'severity': 'I',
'timestamp': '2017-08-17T07:56:33.489+0200'},
'timestamp': '2017-08-17T07:56:33.489+0200',
'type': 'log'}
>>> input_line2 = '2017-08-17T07:56:33.515+0200 W NETWORK [initandlisten] No primary detected for set confsvr_repl1'
>>> output_line2 = mongodb(input_line2)
>>> pprint.pprint(output_line2)
{'data': {'component': 'NETWORK',
'context': '[initandlisten]',
'message': 'No primary detected for set confsvr_repl1',
'severity': 'W',
'timestamp': '2017-08-17T07:56:33.515+0200'},
'timestamp': '2017-08-17T07:56:33.515+0200',
'type': 'log'}
'''
keys = ['timestamp', 'severity', 'component', 'context', 'message']
values = re.split(r'\s+', line, maxsplit=4)
mongodb_log = dict(zip(keys,values))
return dict(
timestamp=values[0],
data=mongodb_log,
type='log',
) | >>> import pprint
>>> input_line1 = '2017-08-17T07:56:33.489+0200 I REPL [signalProcessingThread] shutting down replication subsystems'
>>> output_line1 = mongodb(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'component': 'REPL',
'context': '[signalProcessingThread]',
'message': 'shutting down replication subsystems',
'severity': 'I',
'timestamp': '2017-08-17T07:56:33.489+0200'},
'timestamp': '2017-08-17T07:56:33.489+0200',
'type': 'log'}
>>> input_line2 = '2017-08-17T07:56:33.515+0200 W NETWORK [initandlisten] No primary detected for set confsvr_repl1'
>>> output_line2 = mongodb(input_line2)
>>> pprint.pprint(output_line2)
{'data': {'component': 'NETWORK',
'context': '[initandlisten]',
'message': 'No primary detected for set confsvr_repl1',
'severity': 'W',
'timestamp': '2017-08-17T07:56:33.515+0200'},
'timestamp': '2017-08-17T07:56:33.515+0200',
'type': 'log'} | Below is the the instruction that describes the task:
### Input:
>>> import pprint
>>> input_line1 = '2017-08-17T07:56:33.489+0200 I REPL [signalProcessingThread] shutting down replication subsystems'
>>> output_line1 = mongodb(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'component': 'REPL',
'context': '[signalProcessingThread]',
'message': 'shutting down replication subsystems',
'severity': 'I',
'timestamp': '2017-08-17T07:56:33.489+0200'},
'timestamp': '2017-08-17T07:56:33.489+0200',
'type': 'log'}
>>> input_line2 = '2017-08-17T07:56:33.515+0200 W NETWORK [initandlisten] No primary detected for set confsvr_repl1'
>>> output_line2 = mongodb(input_line2)
>>> pprint.pprint(output_line2)
{'data': {'component': 'NETWORK',
'context': '[initandlisten]',
'message': 'No primary detected for set confsvr_repl1',
'severity': 'W',
'timestamp': '2017-08-17T07:56:33.515+0200'},
'timestamp': '2017-08-17T07:56:33.515+0200',
'type': 'log'}
### Response:
def mongodb(line):
'''
>>> import pprint
>>> input_line1 = '2017-08-17T07:56:33.489+0200 I REPL [signalProcessingThread] shutting down replication subsystems'
>>> output_line1 = mongodb(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'component': 'REPL',
'context': '[signalProcessingThread]',
'message': 'shutting down replication subsystems',
'severity': 'I',
'timestamp': '2017-08-17T07:56:33.489+0200'},
'timestamp': '2017-08-17T07:56:33.489+0200',
'type': 'log'}
>>> input_line2 = '2017-08-17T07:56:33.515+0200 W NETWORK [initandlisten] No primary detected for set confsvr_repl1'
>>> output_line2 = mongodb(input_line2)
>>> pprint.pprint(output_line2)
{'data': {'component': 'NETWORK',
'context': '[initandlisten]',
'message': 'No primary detected for set confsvr_repl1',
'severity': 'W',
'timestamp': '2017-08-17T07:56:33.515+0200'},
'timestamp': '2017-08-17T07:56:33.515+0200',
'type': 'log'}
'''
keys = ['timestamp', 'severity', 'component', 'context', 'message']
values = re.split(r'\s+', line, maxsplit=4)
mongodb_log = dict(zip(keys,values))
return dict(
timestamp=values[0],
data=mongodb_log,
type='log',
) |
def fromXml(cls, xparent):
"""
Loads the settings for this dataset to the inputted parent xml.
:param xparent | <xml.etree.ElementTree.Element>
"""
output = cls()
for xentry in xparent:
key = xentry.get('key')
if not key:
continue
typ = xentry.get('type', 'str')
if typ in DataSet._xmlTypes:
value = DataSet._xmlTypes[typ][1](xentry)
else:
value = xentry.get('value', '')
output.define(key, value)
return output | Loads the settings for this dataset to the inputted parent xml.
:param xparent | <xml.etree.ElementTree.Element> | Below is the the instruction that describes the task:
### Input:
Loads the settings for this dataset to the inputted parent xml.
:param xparent | <xml.etree.ElementTree.Element>
### Response:
def fromXml(cls, xparent):
"""
Loads the settings for this dataset to the inputted parent xml.
:param xparent | <xml.etree.ElementTree.Element>
"""
output = cls()
for xentry in xparent:
key = xentry.get('key')
if not key:
continue
typ = xentry.get('type', 'str')
if typ in DataSet._xmlTypes:
value = DataSet._xmlTypes[typ][1](xentry)
else:
value = xentry.get('value', '')
output.define(key, value)
return output |
def _get_action_type(self, action):
'''
Get action type.
:param action:
:return:
'''
action_name = next(iter(action or {'': None}))
if ':' not in action_name:
action_name = '{}:{}'.format(self.CALL_TYPE, action_name)
return action_name.split(':')[0] or None | Get action type.
:param action:
:return: | Below is the the instruction that describes the task:
### Input:
Get action type.
:param action:
:return:
### Response:
def _get_action_type(self, action):
'''
Get action type.
:param action:
:return:
'''
action_name = next(iter(action or {'': None}))
if ':' not in action_name:
action_name = '{}:{}'.format(self.CALL_TYPE, action_name)
return action_name.split(':')[0] or None |
def start(self):
"""Start the process with daemonization.
If the process is already started this call should exit with code
ALREADY_RUNNING. Otherwise it must call the 'daemonize' method and then
call 'run'.
"""
if self.pid is not None:
LOG.error(
"The process is already running with pid {0}.".format(self.pid)
)
sys.exit(exit.ALREADY_RUNNING)
self.daemonize()
LOG.info("Beginning run loop for process.")
try:
self.run()
except Exception:
LOG.exception("Uncaught exception in the daemon run() method.")
self.stop()
sys.exit(exit.RUN_FAILURE) | Start the process with daemonization.
If the process is already started this call should exit with code
ALREADY_RUNNING. Otherwise it must call the 'daemonize' method and then
call 'run'. | Below is the the instruction that describes the task:
### Input:
Start the process with daemonization.
If the process is already started this call should exit with code
ALREADY_RUNNING. Otherwise it must call the 'daemonize' method and then
call 'run'.
### Response:
def start(self):
"""Start the process with daemonization.
If the process is already started this call should exit with code
ALREADY_RUNNING. Otherwise it must call the 'daemonize' method and then
call 'run'.
"""
if self.pid is not None:
LOG.error(
"The process is already running with pid {0}.".format(self.pid)
)
sys.exit(exit.ALREADY_RUNNING)
self.daemonize()
LOG.info("Beginning run loop for process.")
try:
self.run()
except Exception:
LOG.exception("Uncaught exception in the daemon run() method.")
self.stop()
sys.exit(exit.RUN_FAILURE) |
def _string_hash(s):
"""String hash (djb2) with consistency between py2/py3 and persistency between runs (unlike `hash`)."""
h = 5381
for c in s:
h = h * 33 + ord(c)
return h | String hash (djb2) with consistency between py2/py3 and persistency between runs (unlike `hash`). | Below is the the instruction that describes the task:
### Input:
String hash (djb2) with consistency between py2/py3 and persistency between runs (unlike `hash`).
### Response:
def _string_hash(s):
"""String hash (djb2) with consistency between py2/py3 and persistency between runs (unlike `hash`)."""
h = 5381
for c in s:
h = h * 33 + ord(c)
return h |
def is_middleware(func) -> bool:
"""
test whether it is a middleware
:return: Boolean
"""
if inspect.isfunction(func):
_check = func
_name = func.__name__
else:
_check = func.__call__
_name = func.__class__.__name__
if not inspect.iscoroutinefunction(_check):
raise UnknownMiddlewareException("Middleware {} should be async function".format(_name))
args = list(inspect.signature(_check).parameters.keys())
if set(args) - MIDDLEWARE_PARAMETER_BOUNDARY:
raise UnknownMiddlewareException("Parameters of middleware {} "
"must be in list ['app', 'request', 'response', 'next']".format(_name))
return True | test whether it is a middleware
:return: Boolean | Below is the the instruction that describes the task:
### Input:
test whether it is a middleware
:return: Boolean
### Response:
def is_middleware(func) -> bool:
"""
test whether it is a middleware
:return: Boolean
"""
if inspect.isfunction(func):
_check = func
_name = func.__name__
else:
_check = func.__call__
_name = func.__class__.__name__
if not inspect.iscoroutinefunction(_check):
raise UnknownMiddlewareException("Middleware {} should be async function".format(_name))
args = list(inspect.signature(_check).parameters.keys())
if set(args) - MIDDLEWARE_PARAMETER_BOUNDARY:
raise UnknownMiddlewareException("Parameters of middleware {} "
"must be in list ['app', 'request', 'response', 'next']".format(_name))
return True |
def stage_tc_batch(self, owner, staging_data):
"""Stage data in ThreatConnect Platform using batch API.
Args:
owner (str): The ThreatConnect owner to submit batch job.
staging_data (dict): A dict of ThreatConnect batch data.
"""
batch = self.tcex.batch(owner)
for group in staging_data.get('group') or []:
# add to redis
variable = group.pop('variable', None)
path = group.pop('path', None)
data = self.path_data(group, path)
# update group data
if group.get('xid') is None:
# add xid if one doesn't exist
group['xid'] = self.stage_tc_batch_xid(group.get('type'), group.get('name'), owner)
# add owner name
group['ownerName'] = owner
# add to batch
batch.add_group(group)
# create tcentity
if variable is not None and data is not None:
self.stage_redis(variable, self.stage_tc_group_entity(data))
for indicator in staging_data.get('indicator') or []:
# add to redis
variable = indicator.pop('variable', None)
path = indicator.pop('path', None)
if indicator.get('xid') is None:
indicator['xid'] = self.stage_tc_batch_xid(
indicator.get('type'), indicator.get('summary'), owner
)
indicator['ownerName'] = owner
# add to batch after extra data has been popped
batch.add_indicator(indicator)
data = self.path_data(dict(indicator), path)
if variable is not None and data is not None:
# if isinstance(data, (dict)):
# tcentity uses value as the name
# data['value'] = data.pop('summary')
self.stage_redis(variable, self.stage_tc_indicator_entity(data))
# submit batch
batch_results = batch.submit()
self.log.debug('[stage] Batch Results: {}'.format(batch_results))
for error in batch_results.get('errors') or []:
self.log.error('[stage] {}'.format(error)) | Stage data in ThreatConnect Platform using batch API.
Args:
owner (str): The ThreatConnect owner to submit batch job.
staging_data (dict): A dict of ThreatConnect batch data. | Below is the the instruction that describes the task:
### Input:
Stage data in ThreatConnect Platform using batch API.
Args:
owner (str): The ThreatConnect owner to submit batch job.
staging_data (dict): A dict of ThreatConnect batch data.
### Response:
def stage_tc_batch(self, owner, staging_data):
"""Stage data in ThreatConnect Platform using batch API.
Args:
owner (str): The ThreatConnect owner to submit batch job.
staging_data (dict): A dict of ThreatConnect batch data.
"""
batch = self.tcex.batch(owner)
for group in staging_data.get('group') or []:
# add to redis
variable = group.pop('variable', None)
path = group.pop('path', None)
data = self.path_data(group, path)
# update group data
if group.get('xid') is None:
# add xid if one doesn't exist
group['xid'] = self.stage_tc_batch_xid(group.get('type'), group.get('name'), owner)
# add owner name
group['ownerName'] = owner
# add to batch
batch.add_group(group)
# create tcentity
if variable is not None and data is not None:
self.stage_redis(variable, self.stage_tc_group_entity(data))
for indicator in staging_data.get('indicator') or []:
# add to redis
variable = indicator.pop('variable', None)
path = indicator.pop('path', None)
if indicator.get('xid') is None:
indicator['xid'] = self.stage_tc_batch_xid(
indicator.get('type'), indicator.get('summary'), owner
)
indicator['ownerName'] = owner
# add to batch after extra data has been popped
batch.add_indicator(indicator)
data = self.path_data(dict(indicator), path)
if variable is not None and data is not None:
# if isinstance(data, (dict)):
# tcentity uses value as the name
# data['value'] = data.pop('summary')
self.stage_redis(variable, self.stage_tc_indicator_entity(data))
# submit batch
batch_results = batch.submit()
self.log.debug('[stage] Batch Results: {}'.format(batch_results))
for error in batch_results.get('errors') or []:
self.log.error('[stage] {}'.format(error)) |
def plot_cap_exposures_longshort(long_exposures, short_exposures, ax=None):
"""
Plots outputs of compute_cap_exposures as area charts
Parameters
----------
long_exposures, short_exposures : arrays
Arrays of long and short market cap exposures (output of
compute_cap_exposures).
"""
if ax is None:
ax = plt.gca()
color_list = plt.cm.gist_rainbow(np.linspace(0, 1, 5))
ax.stackplot(long_exposures[0].index, long_exposures,
labels=CAP_BUCKETS.keys(), colors=color_list, alpha=0.8,
baseline='zero')
ax.stackplot(long_exposures[0].index, short_exposures, colors=color_list,
alpha=0.8, baseline='zero')
ax.axhline(0, color='k', linestyle='-')
ax.set(title='Long and short exposures to market caps',
ylabel='Proportion of long/short exposure in market cap buckets')
ax.legend(loc='upper left', frameon=True, framealpha=0.5)
return ax | Plots outputs of compute_cap_exposures as area charts
Parameters
----------
long_exposures, short_exposures : arrays
Arrays of long and short market cap exposures (output of
compute_cap_exposures). | Below is the the instruction that describes the task:
### Input:
Plots outputs of compute_cap_exposures as area charts
Parameters
----------
long_exposures, short_exposures : arrays
Arrays of long and short market cap exposures (output of
compute_cap_exposures).
### Response:
def plot_cap_exposures_longshort(long_exposures, short_exposures, ax=None):
"""
Plots outputs of compute_cap_exposures as area charts
Parameters
----------
long_exposures, short_exposures : arrays
Arrays of long and short market cap exposures (output of
compute_cap_exposures).
"""
if ax is None:
ax = plt.gca()
color_list = plt.cm.gist_rainbow(np.linspace(0, 1, 5))
ax.stackplot(long_exposures[0].index, long_exposures,
labels=CAP_BUCKETS.keys(), colors=color_list, alpha=0.8,
baseline='zero')
ax.stackplot(long_exposures[0].index, short_exposures, colors=color_list,
alpha=0.8, baseline='zero')
ax.axhline(0, color='k', linestyle='-')
ax.set(title='Long and short exposures to market caps',
ylabel='Proportion of long/short exposure in market cap buckets')
ax.legend(loc='upper left', frameon=True, framealpha=0.5)
return ax |
def p_namespace(p):
""" asm : NAMESPACE ID
"""
global NAMESPACE
NAMESPACE = normalize_namespace(p[2])
__DEBUG__('Setting namespace to ' + (NAMESPACE.rstrip(DOT) or DOT), level=1) | asm : NAMESPACE ID | Below is the the instruction that describes the task:
### Input:
asm : NAMESPACE ID
### Response:
def p_namespace(p):
""" asm : NAMESPACE ID
"""
global NAMESPACE
NAMESPACE = normalize_namespace(p[2])
__DEBUG__('Setting namespace to ' + (NAMESPACE.rstrip(DOT) or DOT), level=1) |
def get_appstruct(self):
""" return list of tuples keys and values corresponding to this model's
data """
result = []
for k in self._get_keys():
result.append((k, getattr(self, k)))
return result | return list of tuples keys and values corresponding to this model's
data | Below is the the instruction that describes the task:
### Input:
return list of tuples keys and values corresponding to this model's
data
### Response:
def get_appstruct(self):
""" return list of tuples keys and values corresponding to this model's
data """
result = []
for k in self._get_keys():
result.append((k, getattr(self, k)))
return result |
def _self_referential_fk(klass_model):
"""
Return whether this model has a self ref FK, and the name for the field
"""
for f in klass_model._meta.concrete_fields:
if f.related_model:
if issubclass(klass_model, f.related_model):
return f.attname
return None | Return whether this model has a self ref FK, and the name for the field | Below is the the instruction that describes the task:
### Input:
Return whether this model has a self ref FK, and the name for the field
### Response:
def _self_referential_fk(klass_model):
"""
Return whether this model has a self ref FK, and the name for the field
"""
for f in klass_model._meta.concrete_fields:
if f.related_model:
if issubclass(klass_model, f.related_model):
return f.attname
return None |
def parse_command_line() -> Namespace:
"""Parse command line options and set them to ``config``.
This function skips unknown command line options. After parsing options,
set log level and set options in ``tornado.options``.
"""
import tornado.options
parser.parse_known_args(namespace=config)
set_loglevel() # set new log level based on commanline option
for k, v in vars(config).items():
if k.startswith('log'):
tornado.options.options.__setattr__(k, v)
return config | Parse command line options and set them to ``config``.
This function skips unknown command line options. After parsing options,
set log level and set options in ``tornado.options``. | Below is the the instruction that describes the task:
### Input:
Parse command line options and set them to ``config``.
This function skips unknown command line options. After parsing options,
set log level and set options in ``tornado.options``.
### Response:
def parse_command_line() -> Namespace:
"""Parse command line options and set them to ``config``.
This function skips unknown command line options. After parsing options,
set log level and set options in ``tornado.options``.
"""
import tornado.options
parser.parse_known_args(namespace=config)
set_loglevel() # set new log level based on commanline option
for k, v in vars(config).items():
if k.startswith('log'):
tornado.options.options.__setattr__(k, v)
return config |
def train(self, data=None, epochs=10, radius0=0, radiusN=1,
radiuscooling="linear",
scale0=0.1, scaleN=0.01, scalecooling="linear"):
"""Train the map on the current data in the Somoclu object.
:param data: Optional parameter to provide training data. It is not
necessary if the data was added via the method
`update_data`.
:type data: 2D numpy.array of float32.
:param epochs: The number of epochs to train the map for.
:type epochs: int.
:param radius0: The initial radius on the map where the update happens
around a best matching unit. Default value of 0 will
trigger a value of min(n_columns, n_rows)/2.
:type radius0: float.
:param radiusN: The radius on the map where the update happens around a
best matching unit in the final epoch. Default: 1.
:type radiusN: float.
:param radiuscooling: The cooling strategy between radius0 and radiusN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:param scale0: The initial learning scale. Default value: 0.1.
:type scale0: float.
:param scaleN: The learning scale in the final epoch. Default: 0.01.
:type scaleN: float.
:param scalecooling: The cooling strategy between scale0 and scaleN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:type scalecooling: str.
"""
_check_cooling_parameters(radiuscooling, scalecooling)
if self._data is None and data is None:
raise Exception("No data was provided!")
elif data is not None:
self.update_data(data)
self._init_codebook()
self.umatrix.shape = (self._n_rows * self._n_columns, )
self.bmus.shape = (self.n_vectors * 2, )
wrap_train(np.ravel(self._data), epochs, self._n_columns, self._n_rows,
self.n_dim, self.n_vectors, radius0, radiusN,
radiuscooling, scale0, scaleN, scalecooling,
self._kernel_type, self._map_type, self._grid_type,
self._compact_support, self._neighborhood == "gaussian",
self._std_coeff, self._verbose, self.codebook, self.bmus,
self.umatrix)
self.umatrix.shape = (self._n_rows, self._n_columns)
self.bmus.shape = (self.n_vectors, 2)
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim) | Train the map on the current data in the Somoclu object.
:param data: Optional parameter to provide training data. It is not
necessary if the data was added via the method
`update_data`.
:type data: 2D numpy.array of float32.
:param epochs: The number of epochs to train the map for.
:type epochs: int.
:param radius0: The initial radius on the map where the update happens
around a best matching unit. Default value of 0 will
trigger a value of min(n_columns, n_rows)/2.
:type radius0: float.
:param radiusN: The radius on the map where the update happens around a
best matching unit in the final epoch. Default: 1.
:type radiusN: float.
:param radiuscooling: The cooling strategy between radius0 and radiusN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:param scale0: The initial learning scale. Default value: 0.1.
:type scale0: float.
:param scaleN: The learning scale in the final epoch. Default: 0.01.
:type scaleN: float.
:param scalecooling: The cooling strategy between scale0 and scaleN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:type scalecooling: str. | Below is the the instruction that describes the task:
### Input:
Train the map on the current data in the Somoclu object.
:param data: Optional parameter to provide training data. It is not
necessary if the data was added via the method
`update_data`.
:type data: 2D numpy.array of float32.
:param epochs: The number of epochs to train the map for.
:type epochs: int.
:param radius0: The initial radius on the map where the update happens
around a best matching unit. Default value of 0 will
trigger a value of min(n_columns, n_rows)/2.
:type radius0: float.
:param radiusN: The radius on the map where the update happens around a
best matching unit in the final epoch. Default: 1.
:type radiusN: float.
:param radiuscooling: The cooling strategy between radius0 and radiusN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:param scale0: The initial learning scale. Default value: 0.1.
:type scale0: float.
:param scaleN: The learning scale in the final epoch. Default: 0.01.
:type scaleN: float.
:param scalecooling: The cooling strategy between scale0 and scaleN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:type scalecooling: str.
### Response:
def train(self, data=None, epochs=10, radius0=0, radiusN=1,
radiuscooling="linear",
scale0=0.1, scaleN=0.01, scalecooling="linear"):
"""Train the map on the current data in the Somoclu object.
:param data: Optional parameter to provide training data. It is not
necessary if the data was added via the method
`update_data`.
:type data: 2D numpy.array of float32.
:param epochs: The number of epochs to train the map for.
:type epochs: int.
:param radius0: The initial radius on the map where the update happens
around a best matching unit. Default value of 0 will
trigger a value of min(n_columns, n_rows)/2.
:type radius0: float.
:param radiusN: The radius on the map where the update happens around a
best matching unit in the final epoch. Default: 1.
:type radiusN: float.
:param radiuscooling: The cooling strategy between radius0 and radiusN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:param scale0: The initial learning scale. Default value: 0.1.
:type scale0: float.
:param scaleN: The learning scale in the final epoch. Default: 0.01.
:type scaleN: float.
:param scalecooling: The cooling strategy between scale0 and scaleN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:type scalecooling: str.
"""
_check_cooling_parameters(radiuscooling, scalecooling)
if self._data is None and data is None:
raise Exception("No data was provided!")
elif data is not None:
self.update_data(data)
self._init_codebook()
self.umatrix.shape = (self._n_rows * self._n_columns, )
self.bmus.shape = (self.n_vectors * 2, )
wrap_train(np.ravel(self._data), epochs, self._n_columns, self._n_rows,
self.n_dim, self.n_vectors, radius0, radiusN,
radiuscooling, scale0, scaleN, scalecooling,
self._kernel_type, self._map_type, self._grid_type,
self._compact_support, self._neighborhood == "gaussian",
self._std_coeff, self._verbose, self.codebook, self.bmus,
self.umatrix)
self.umatrix.shape = (self._n_rows, self._n_columns)
self.bmus.shape = (self.n_vectors, 2)
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim) |
def disconnect(self):
"""
Disconnects the tree connection.
"""
if not self._connected:
return
log.info("Session: %s, Tree: %s - Disconnecting from Tree Connect"
% (self.session.username, self.share_name))
req = SMB2TreeDisconnect()
log.info("Session: %s, Tree: %s - Sending Tree Disconnect message"
% (self.session.username, self.share_name))
log.debug(str(req))
request = self.session.connection.send(req,
sid=self.session.session_id,
tid=self.tree_connect_id)
log.info("Session: %s, Tree: %s - Receiving Tree Disconnect response"
% (self.session.username, self.share_name))
res = self.session.connection.receive(request)
res_disconnect = SMB2TreeDisconnect()
res_disconnect.unpack(res['data'].get_value())
log.debug(str(res_disconnect))
self._connected = False
del self.session.tree_connect_table[self.tree_connect_id] | Disconnects the tree connection. | Below is the the instruction that describes the task:
### Input:
Disconnects the tree connection.
### Response:
def disconnect(self):
"""
Disconnects the tree connection.
"""
if not self._connected:
return
log.info("Session: %s, Tree: %s - Disconnecting from Tree Connect"
% (self.session.username, self.share_name))
req = SMB2TreeDisconnect()
log.info("Session: %s, Tree: %s - Sending Tree Disconnect message"
% (self.session.username, self.share_name))
log.debug(str(req))
request = self.session.connection.send(req,
sid=self.session.session_id,
tid=self.tree_connect_id)
log.info("Session: %s, Tree: %s - Receiving Tree Disconnect response"
% (self.session.username, self.share_name))
res = self.session.connection.receive(request)
res_disconnect = SMB2TreeDisconnect()
res_disconnect.unpack(res['data'].get_value())
log.debug(str(res_disconnect))
self._connected = False
del self.session.tree_connect_table[self.tree_connect_id] |
def RegisterHasher(cls, hasher_class):
"""Registers a hasher class.
The hasher classes are identified based on their lower case name.
Args:
hasher_class (type): class object of the hasher.
Raises:
KeyError: if hasher class is already set for the corresponding name.
"""
hasher_name = hasher_class.NAME.lower()
if hasher_name in cls._hasher_classes:
raise KeyError((
'hasher class already set for name: {0:s}.').format(
hasher_class.NAME))
cls._hasher_classes[hasher_name] = hasher_class | Registers a hasher class.
The hasher classes are identified based on their lower case name.
Args:
hasher_class (type): class object of the hasher.
Raises:
KeyError: if hasher class is already set for the corresponding name. | Below is the the instruction that describes the task:
### Input:
Registers a hasher class.
The hasher classes are identified based on their lower case name.
Args:
hasher_class (type): class object of the hasher.
Raises:
KeyError: if hasher class is already set for the corresponding name.
### Response:
def RegisterHasher(cls, hasher_class):
"""Registers a hasher class.
The hasher classes are identified based on their lower case name.
Args:
hasher_class (type): class object of the hasher.
Raises:
KeyError: if hasher class is already set for the corresponding name.
"""
hasher_name = hasher_class.NAME.lower()
if hasher_name in cls._hasher_classes:
raise KeyError((
'hasher class already set for name: {0:s}.').format(
hasher_class.NAME))
cls._hasher_classes[hasher_name] = hasher_class |
def _shorten_render(renderer, max_len):
"""Return a modified that returns the representation of expr, or '...' if
that representation is longer than `max_len`"""
def short_renderer(expr):
res = renderer(expr)
if len(res) > max_len:
return '...'
else:
return res
return short_renderer | Return a modified that returns the representation of expr, or '...' if
that representation is longer than `max_len` | Below is the the instruction that describes the task:
### Input:
Return a modified that returns the representation of expr, or '...' if
that representation is longer than `max_len`
### Response:
def _shorten_render(renderer, max_len):
"""Return a modified that returns the representation of expr, or '...' if
that representation is longer than `max_len`"""
def short_renderer(expr):
res = renderer(expr)
if len(res) > max_len:
return '...'
else:
return res
return short_renderer |
def set_chassis_location(location,
host=None,
admin_username=None,
admin_password=None):
'''
Set the location of the chassis.
location
The name of the location to be set on the chassis.
host
The chassis host.
admin_username
The username used to access the chassis.
admin_password
The password used to access the chassis.
CLI Example:
.. code-block:: bash
salt '*' dracr.set_chassis_location location-name host=111.222.333.444
admin_username=root admin_password=secret
'''
return __execute_cmd('setsysinfo -c chassislocation {0}'.format(location),
host=host, admin_username=admin_username,
admin_password=admin_password) | Set the location of the chassis.
location
The name of the location to be set on the chassis.
host
The chassis host.
admin_username
The username used to access the chassis.
admin_password
The password used to access the chassis.
CLI Example:
.. code-block:: bash
salt '*' dracr.set_chassis_location location-name host=111.222.333.444
admin_username=root admin_password=secret | Below is the the instruction that describes the task:
### Input:
Set the location of the chassis.
location
The name of the location to be set on the chassis.
host
The chassis host.
admin_username
The username used to access the chassis.
admin_password
The password used to access the chassis.
CLI Example:
.. code-block:: bash
salt '*' dracr.set_chassis_location location-name host=111.222.333.444
admin_username=root admin_password=secret
### Response:
def set_chassis_location(location,
host=None,
admin_username=None,
admin_password=None):
'''
Set the location of the chassis.
location
The name of the location to be set on the chassis.
host
The chassis host.
admin_username
The username used to access the chassis.
admin_password
The password used to access the chassis.
CLI Example:
.. code-block:: bash
salt '*' dracr.set_chassis_location location-name host=111.222.333.444
admin_username=root admin_password=secret
'''
return __execute_cmd('setsysinfo -c chassislocation {0}'.format(location),
host=host, admin_username=admin_username,
admin_password=admin_password) |
def bootstrap_results(self, init_state):
"""Returns an object with the same type as returned by `one_step`.
Args:
init_state: `Tensor` or Python `list` of `Tensor`s representing the
initial state(s) of the Markov chain(s).
Returns:
kernel_results: A (possibly nested) `tuple`, `namedtuple` or `list` of
`Tensor`s representing internal calculations made within this function.
This inculdes replica states.
"""
with tf.compat.v1.name_scope(
name=mcmc_util.make_name(self.name, 'remc', 'bootstrap_results'),
values=[init_state]):
replica_results = [
self.replica_kernels[i].bootstrap_results(init_state)
for i in range(self.num_replica)
]
init_state_parts = (
list(init_state)
if mcmc_util.is_list_like(init_state) else [init_state])
# Convert all states parts to tensor...
replica_states = [[
tf.convert_to_tensor(value=s) for s in init_state_parts
] for i in range(self.num_replica)]
if not mcmc_util.is_list_like(init_state):
replica_states = [s[0] for s in replica_states]
return ReplicaExchangeMCKernelResults(
replica_states=replica_states,
replica_results=replica_results,
sampled_replica_states=replica_states,
sampled_replica_results=replica_results,
) | Returns an object with the same type as returned by `one_step`.
Args:
init_state: `Tensor` or Python `list` of `Tensor`s representing the
initial state(s) of the Markov chain(s).
Returns:
kernel_results: A (possibly nested) `tuple`, `namedtuple` or `list` of
`Tensor`s representing internal calculations made within this function.
This inculdes replica states. | Below is the the instruction that describes the task:
### Input:
Returns an object with the same type as returned by `one_step`.
Args:
init_state: `Tensor` or Python `list` of `Tensor`s representing the
initial state(s) of the Markov chain(s).
Returns:
kernel_results: A (possibly nested) `tuple`, `namedtuple` or `list` of
`Tensor`s representing internal calculations made within this function.
This inculdes replica states.
### Response:
def bootstrap_results(self, init_state):
"""Returns an object with the same type as returned by `one_step`.
Args:
init_state: `Tensor` or Python `list` of `Tensor`s representing the
initial state(s) of the Markov chain(s).
Returns:
kernel_results: A (possibly nested) `tuple`, `namedtuple` or `list` of
`Tensor`s representing internal calculations made within this function.
This inculdes replica states.
"""
with tf.compat.v1.name_scope(
name=mcmc_util.make_name(self.name, 'remc', 'bootstrap_results'),
values=[init_state]):
replica_results = [
self.replica_kernels[i].bootstrap_results(init_state)
for i in range(self.num_replica)
]
init_state_parts = (
list(init_state)
if mcmc_util.is_list_like(init_state) else [init_state])
# Convert all states parts to tensor...
replica_states = [[
tf.convert_to_tensor(value=s) for s in init_state_parts
] for i in range(self.num_replica)]
if not mcmc_util.is_list_like(init_state):
replica_states = [s[0] for s in replica_states]
return ReplicaExchangeMCKernelResults(
replica_states=replica_states,
replica_results=replica_results,
sampled_replica_states=replica_states,
sampled_replica_results=replica_results,
) |
def _map_trajectory(self):
""" Return filepath as a class attribute"""
self.trajectory_map = {}
with open(self.filepath, 'r') as trajectory_file:
with closing(
mmap(
trajectory_file.fileno(), 0,
access=ACCESS_READ)) as mapped_file:
progress = 0
line = 0
frame = -1
frame_start = 0
while progress <= len(mapped_file):
line = line + 1
# We read a binary data from a mapped file.
bline = mapped_file.readline()
# If the bline length equals zero we terminate.
# We reached end of the file but still add the last frame!
if len(bline) == 0:
frame = frame + 1
if progress - frame_start > 10:
self.trajectory_map[frame] = [
frame_start, progress
]
break
# We need to decode byte line into an utf-8 string.
sline = bline.decode("utf-8").strip('\n').split()
# We extract map's byte coordinates for each frame
if len(sline) == 1 and sline[0] == 'END':
frame = frame + 1
self.trajectory_map[frame] = [frame_start, progress]
frame_start = progress
# Here we extract the map's byte coordinates for the header
# And also the periodic system type needed for later.
progress = progress + len(bline)
self.no_of_frames = frame | Return filepath as a class attribute | Below is the the instruction that describes the task:
### Input:
Return filepath as a class attribute
### Response:
def _map_trajectory(self):
""" Return filepath as a class attribute"""
self.trajectory_map = {}
with open(self.filepath, 'r') as trajectory_file:
with closing(
mmap(
trajectory_file.fileno(), 0,
access=ACCESS_READ)) as mapped_file:
progress = 0
line = 0
frame = -1
frame_start = 0
while progress <= len(mapped_file):
line = line + 1
# We read a binary data from a mapped file.
bline = mapped_file.readline()
# If the bline length equals zero we terminate.
# We reached end of the file but still add the last frame!
if len(bline) == 0:
frame = frame + 1
if progress - frame_start > 10:
self.trajectory_map[frame] = [
frame_start, progress
]
break
# We need to decode byte line into an utf-8 string.
sline = bline.decode("utf-8").strip('\n').split()
# We extract map's byte coordinates for each frame
if len(sline) == 1 and sline[0] == 'END':
frame = frame + 1
self.trajectory_map[frame] = [frame_start, progress]
frame_start = progress
# Here we extract the map's byte coordinates for the header
# And also the periodic system type needed for later.
progress = progress + len(bline)
self.no_of_frames = frame |
def kill(args):
"""
%prog kill [options] JOBNAMEPAT/JOBIDs
Kill jobs based on JOBNAME pattern matching (case-sensitive)
or list of JOBIDs (comma separated)
Examples:
%prog kill "pyth*" # Use regex
%prog kill 160253,160245,160252 # Use list of job ids
%prog kill all # Everything
"""
import shlex
from jcvi.apps.base import sh, getusername
from subprocess import check_output, CalledProcessError
import xml.etree.ElementTree as ET
valid_methods = ("pattern", "jobid")
p = OptionParser(kill.__doc__)
p.add_option("--method", choices=valid_methods,
help="Identify jobs based on [default: guess]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
username = getusername()
tag, = args
tag = tag.strip()
if tag == "all":
sh("qdel -u {0}".format(username))
return
valid_jobids = set()
method = opts.method or guess_method(tag)
if method == "jobid":
jobids = tag.split(",")
valid_jobids |= set(jobids)
elif method == "pattern":
qsxmlcmd = 'qstat -u "{0}" -j "{1}" -nenv -njd -xml'.\
format(username, tag)
try:
qsxml = check_output(shlex.split(qsxmlcmd)).strip()
except CalledProcessError as e:
qsxml = None
logging.debug('No jobs matching the pattern "{0}"'.format(tag))
if qsxml is not None:
for job in ET.fromstring(qsxml).findall("djob_info"):
for elem in job.findall("element"):
jobid = elem.find("JB_job_number").text
valid_jobids.add(jobid)
if valid_jobids:
sh("qdel {0}".format(",".join(valid_jobids))) | %prog kill [options] JOBNAMEPAT/JOBIDs
Kill jobs based on JOBNAME pattern matching (case-sensitive)
or list of JOBIDs (comma separated)
Examples:
%prog kill "pyth*" # Use regex
%prog kill 160253,160245,160252 # Use list of job ids
%prog kill all # Everything | Below is the the instruction that describes the task:
### Input:
%prog kill [options] JOBNAMEPAT/JOBIDs
Kill jobs based on JOBNAME pattern matching (case-sensitive)
or list of JOBIDs (comma separated)
Examples:
%prog kill "pyth*" # Use regex
%prog kill 160253,160245,160252 # Use list of job ids
%prog kill all # Everything
### Response:
def kill(args):
"""
%prog kill [options] JOBNAMEPAT/JOBIDs
Kill jobs based on JOBNAME pattern matching (case-sensitive)
or list of JOBIDs (comma separated)
Examples:
%prog kill "pyth*" # Use regex
%prog kill 160253,160245,160252 # Use list of job ids
%prog kill all # Everything
"""
import shlex
from jcvi.apps.base import sh, getusername
from subprocess import check_output, CalledProcessError
import xml.etree.ElementTree as ET
valid_methods = ("pattern", "jobid")
p = OptionParser(kill.__doc__)
p.add_option("--method", choices=valid_methods,
help="Identify jobs based on [default: guess]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
username = getusername()
tag, = args
tag = tag.strip()
if tag == "all":
sh("qdel -u {0}".format(username))
return
valid_jobids = set()
method = opts.method or guess_method(tag)
if method == "jobid":
jobids = tag.split(",")
valid_jobids |= set(jobids)
elif method == "pattern":
qsxmlcmd = 'qstat -u "{0}" -j "{1}" -nenv -njd -xml'.\
format(username, tag)
try:
qsxml = check_output(shlex.split(qsxmlcmd)).strip()
except CalledProcessError as e:
qsxml = None
logging.debug('No jobs matching the pattern "{0}"'.format(tag))
if qsxml is not None:
for job in ET.fromstring(qsxml).findall("djob_info"):
for elem in job.findall("element"):
jobid = elem.find("JB_job_number").text
valid_jobids.add(jobid)
if valid_jobids:
sh("qdel {0}".format(",".join(valid_jobids))) |
def copy(
self,
extractor=None,
needs=None,
store=None,
data_writer=None,
persistence=None,
extractor_args=None):
"""
Use self as a template to build a new feature, replacing
values in kwargs
"""
f = Feature(
extractor or self.extractor,
needs=needs,
store=self.store if store is None else store,
encoder=self.encoder,
decoder=self.decoder,
key=self.key,
data_writer=data_writer,
persistence=persistence,
**(extractor_args or self.extractor_args))
f._fixup_needs()
return f | Use self as a template to build a new feature, replacing
values in kwargs | Below is the the instruction that describes the task:
### Input:
Use self as a template to build a new feature, replacing
values in kwargs
### Response:
def copy(
self,
extractor=None,
needs=None,
store=None,
data_writer=None,
persistence=None,
extractor_args=None):
"""
Use self as a template to build a new feature, replacing
values in kwargs
"""
f = Feature(
extractor or self.extractor,
needs=needs,
store=self.store if store is None else store,
encoder=self.encoder,
decoder=self.decoder,
key=self.key,
data_writer=data_writer,
persistence=persistence,
**(extractor_args or self.extractor_args))
f._fixup_needs()
return f |
def get_save_as_filename(defaultfilename: str,
defaultextension: str,
title: str = "Save As") -> str:
"""
Provides a GUI "Save As" dialogue (via ``tkinter``) and returns the
filename.
"""
root = tkinter.Tk() # create and get Tk topmost window
# (don't do this too early; the command prompt loses focus)
root.withdraw() # won't need this; this gets rid of a blank Tk window
root.attributes('-topmost', True) # makes the tk window topmost
filename = filedialog.asksaveasfilename(
initialfile=defaultfilename,
defaultextension=defaultextension,
parent=root,
title=title
)
root.attributes('-topmost', False) # stop the tk window being topmost
return filename | Provides a GUI "Save As" dialogue (via ``tkinter``) and returns the
filename. | Below is the the instruction that describes the task:
### Input:
Provides a GUI "Save As" dialogue (via ``tkinter``) and returns the
filename.
### Response:
def get_save_as_filename(defaultfilename: str,
defaultextension: str,
title: str = "Save As") -> str:
"""
Provides a GUI "Save As" dialogue (via ``tkinter``) and returns the
filename.
"""
root = tkinter.Tk() # create and get Tk topmost window
# (don't do this too early; the command prompt loses focus)
root.withdraw() # won't need this; this gets rid of a blank Tk window
root.attributes('-topmost', True) # makes the tk window topmost
filename = filedialog.asksaveasfilename(
initialfile=defaultfilename,
defaultextension=defaultextension,
parent=root,
title=title
)
root.attributes('-topmost', False) # stop the tk window being topmost
return filename |
def extrusion(target, throat_perimeter='throat.perimeter',
throat_length='throat.length'):
r"""
Calculate surface area for an arbitrary shaped throat give the perimeter
and length.
Parameters
----------
target : OpenPNM Object
The object which this model is associated with. This controls the
length of the calculated array, and also provides access to other
necessary properties.
throat_perimeter : string
Dictionary key to the throat perimeter array. Default is
'throat.perimeter'.
throat_length : string
Dictionary key to the throat length array. Default is 'throat.length'.
"""
P = target[throat_perimeter]
L = target[throat_length]
value = P*L
return value | r"""
Calculate surface area for an arbitrary shaped throat give the perimeter
and length.
Parameters
----------
target : OpenPNM Object
The object which this model is associated with. This controls the
length of the calculated array, and also provides access to other
necessary properties.
throat_perimeter : string
Dictionary key to the throat perimeter array. Default is
'throat.perimeter'.
throat_length : string
Dictionary key to the throat length array. Default is 'throat.length'. | Below is the the instruction that describes the task:
### Input:
r"""
Calculate surface area for an arbitrary shaped throat give the perimeter
and length.
Parameters
----------
target : OpenPNM Object
The object which this model is associated with. This controls the
length of the calculated array, and also provides access to other
necessary properties.
throat_perimeter : string
Dictionary key to the throat perimeter array. Default is
'throat.perimeter'.
throat_length : string
Dictionary key to the throat length array. Default is 'throat.length'.
### Response:
def extrusion(target, throat_perimeter='throat.perimeter',
throat_length='throat.length'):
r"""
Calculate surface area for an arbitrary shaped throat give the perimeter
and length.
Parameters
----------
target : OpenPNM Object
The object which this model is associated with. This controls the
length of the calculated array, and also provides access to other
necessary properties.
throat_perimeter : string
Dictionary key to the throat perimeter array. Default is
'throat.perimeter'.
throat_length : string
Dictionary key to the throat length array. Default is 'throat.length'.
"""
P = target[throat_perimeter]
L = target[throat_length]
value = P*L
return value |
def client_status(self, config_path):
"""Get status of client for a project, given path to its config."""
c = self.client_for(config_path)
status = "stopped"
if not c or not c.ensime:
status = 'unloaded'
elif c.ensime.is_ready():
status = 'ready'
elif c.ensime.is_running():
status = 'startup'
elif c.ensime.aborted():
status = 'aborted'
return status | Get status of client for a project, given path to its config. | Below is the the instruction that describes the task:
### Input:
Get status of client for a project, given path to its config.
### Response:
def client_status(self, config_path):
"""Get status of client for a project, given path to its config."""
c = self.client_for(config_path)
status = "stopped"
if not c or not c.ensime:
status = 'unloaded'
elif c.ensime.is_ready():
status = 'ready'
elif c.ensime.is_running():
status = 'startup'
elif c.ensime.aborted():
status = 'aborted'
return status |
def crop(self, top=None, bottom=None, right=None, left=None):
"""Crop image.
:param float top: fraction to crop from the top margin
:param float bottom: fraction to crop from the bottom margin
:param float left: fraction to crop from the left margin
:param float right: fraction to crop from the right margin
"""
extractVOI = vtk.vtkExtractVOI()
extractVOI.SetInputData(self.GetInput())
extractVOI.IncludeBoundaryOn()
d = self.GetInput().GetDimensions()
bx0, bx1, by0, by1 = 0, d[0]-1, 0, d[1]-1
if left is not None: bx0 = int((d[0]-1)*left)
if right is not None: bx1 = int((d[0]-1)*(1-right))
if bottom is not None: by0 = int((d[1]-1)*bottom)
if top is not None: by1 = int((d[1]-1)*(1-top))
extractVOI.SetVOI(bx0, bx1, by0, by1, 0, 0)
extractVOI.Update()
img = extractVOI.GetOutput()
#img.SetOrigin(-bx0, -by0, 0)
self.GetMapper().SetInputData(img)
self.GetMapper().Modified()
return self | Crop image.
:param float top: fraction to crop from the top margin
:param float bottom: fraction to crop from the bottom margin
:param float left: fraction to crop from the left margin
:param float right: fraction to crop from the right margin | Below is the the instruction that describes the task:
### Input:
Crop image.
:param float top: fraction to crop from the top margin
:param float bottom: fraction to crop from the bottom margin
:param float left: fraction to crop from the left margin
:param float right: fraction to crop from the right margin
### Response:
def crop(self, top=None, bottom=None, right=None, left=None):
"""Crop image.
:param float top: fraction to crop from the top margin
:param float bottom: fraction to crop from the bottom margin
:param float left: fraction to crop from the left margin
:param float right: fraction to crop from the right margin
"""
extractVOI = vtk.vtkExtractVOI()
extractVOI.SetInputData(self.GetInput())
extractVOI.IncludeBoundaryOn()
d = self.GetInput().GetDimensions()
bx0, bx1, by0, by1 = 0, d[0]-1, 0, d[1]-1
if left is not None: bx0 = int((d[0]-1)*left)
if right is not None: bx1 = int((d[0]-1)*(1-right))
if bottom is not None: by0 = int((d[1]-1)*bottom)
if top is not None: by1 = int((d[1]-1)*(1-top))
extractVOI.SetVOI(bx0, bx1, by0, by1, 0, 0)
extractVOI.Update()
img = extractVOI.GetOutput()
#img.SetOrigin(-bx0, -by0, 0)
self.GetMapper().SetInputData(img)
self.GetMapper().Modified()
return self |
def run(self):
"""Run the log monitor.
This will query Redis once every second to check if there are new log
files to monitor. It will also store those log files in Redis.
"""
while True:
self.update_log_filenames()
self.open_closed_files()
anything_published = self.check_log_files_and_publish_updates()
# If nothing was published, then wait a little bit before checking
# for logs to avoid using too much CPU.
if not anything_published:
time.sleep(0.05) | Run the log monitor.
This will query Redis once every second to check if there are new log
files to monitor. It will also store those log files in Redis. | Below is the the instruction that describes the task:
### Input:
Run the log monitor.
This will query Redis once every second to check if there are new log
files to monitor. It will also store those log files in Redis.
### Response:
def run(self):
"""Run the log monitor.
This will query Redis once every second to check if there are new log
files to monitor. It will also store those log files in Redis.
"""
while True:
self.update_log_filenames()
self.open_closed_files()
anything_published = self.check_log_files_and_publish_updates()
# If nothing was published, then wait a little bit before checking
# for logs to avoid using too much CPU.
if not anything_published:
time.sleep(0.05) |
def get_interface_detail_output_interface_line_protocol_exception_info(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_interface_detail = ET.Element("get_interface_detail")
config = get_interface_detail
output = ET.SubElement(get_interface_detail, "output")
interface = ET.SubElement(output, "interface")
interface_type_key = ET.SubElement(interface, "interface-type")
interface_type_key.text = kwargs.pop('interface_type')
interface_name_key = ET.SubElement(interface, "interface-name")
interface_name_key.text = kwargs.pop('interface_name')
line_protocol_exception_info = ET.SubElement(interface, "line-protocol-exception-info")
line_protocol_exception_info.text = kwargs.pop('line_protocol_exception_info')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_interface_detail_output_interface_line_protocol_exception_info(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_interface_detail = ET.Element("get_interface_detail")
config = get_interface_detail
output = ET.SubElement(get_interface_detail, "output")
interface = ET.SubElement(output, "interface")
interface_type_key = ET.SubElement(interface, "interface-type")
interface_type_key.text = kwargs.pop('interface_type')
interface_name_key = ET.SubElement(interface, "interface-name")
interface_name_key.text = kwargs.pop('interface_name')
line_protocol_exception_info = ET.SubElement(interface, "line-protocol-exception-info")
line_protocol_exception_info.text = kwargs.pop('line_protocol_exception_info')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def apply_statusbar_settings(self):
"""Update status bar widgets settings"""
show_status_bar = CONF.get('main', 'show_status_bar')
self.statusBar().setVisible(show_status_bar)
if show_status_bar:
for widget, name in ((self.mem_status, 'memory_usage'),
(self.cpu_status, 'cpu_usage')):
if widget is not None:
widget.setVisible(CONF.get('main', '%s/enable' % name))
widget.set_interval(CONF.get('main', '%s/timeout' % name))
else:
return | Update status bar widgets settings | Below is the the instruction that describes the task:
### Input:
Update status bar widgets settings
### Response:
def apply_statusbar_settings(self):
"""Update status bar widgets settings"""
show_status_bar = CONF.get('main', 'show_status_bar')
self.statusBar().setVisible(show_status_bar)
if show_status_bar:
for widget, name in ((self.mem_status, 'memory_usage'),
(self.cpu_status, 'cpu_usage')):
if widget is not None:
widget.setVisible(CONF.get('main', '%s/enable' % name))
widget.set_interval(CONF.get('main', '%s/timeout' % name))
else:
return |
def setDirty(self, state=True):
"""
Flags the connection as being dirty and needing a rebuild.
:param state | <bool>
"""
self._dirty = state
# set if this connection should be visible
if self._inputNode and self._outputNode:
vis = self._inputNode.isVisible() and self._outputNode.isVisible()
self.setVisible(vis) | Flags the connection as being dirty and needing a rebuild.
:param state | <bool> | Below is the the instruction that describes the task:
### Input:
Flags the connection as being dirty and needing a rebuild.
:param state | <bool>
### Response:
def setDirty(self, state=True):
"""
Flags the connection as being dirty and needing a rebuild.
:param state | <bool>
"""
self._dirty = state
# set if this connection should be visible
if self._inputNode and self._outputNode:
vis = self._inputNode.isVisible() and self._outputNode.isVisible()
self.setVisible(vis) |
def insert_tabs(self, tab, no_tabs=1):
"""Adds no_tabs tabs before table, appends if tab > maxtabs
and marks grid as changed
"""
# Mark content as changed
post_command_event(self.main_window, self.ContentChangedMsg)
self.code_array.insert(tab, no_tabs, axis=2)
# Update TableChoiceIntCtrl
shape = self.grid.code_array.shape
post_command_event(self.main_window, self.ResizeGridMsg, shape=shape) | Adds no_tabs tabs before table, appends if tab > maxtabs
and marks grid as changed | Below is the the instruction that describes the task:
### Input:
Adds no_tabs tabs before table, appends if tab > maxtabs
and marks grid as changed
### Response:
def insert_tabs(self, tab, no_tabs=1):
"""Adds no_tabs tabs before table, appends if tab > maxtabs
and marks grid as changed
"""
# Mark content as changed
post_command_event(self.main_window, self.ContentChangedMsg)
self.code_array.insert(tab, no_tabs, axis=2)
# Update TableChoiceIntCtrl
shape = self.grid.code_array.shape
post_command_event(self.main_window, self.ResizeGridMsg, shape=shape) |
def cls_slots(self, cls: CLASS_OR_CLASSNAME) -> List[SlotDefinition]:
""" Return the list of slots directly included in the class definition. Includes slots whose
domain is cls -- as declared in slot.domain or class.slots
Does not include slots declared in mixins, apply_to or is_a links
@param cls: class name or class definition name
@return: all direct class slots
"""
if not isinstance(cls, ClassDefinition):
cls = self.schema.classes[cls]
return [self.schema.slots[s] for s in cls.slots] | Return the list of slots directly included in the class definition. Includes slots whose
domain is cls -- as declared in slot.domain or class.slots
Does not include slots declared in mixins, apply_to or is_a links
@param cls: class name or class definition name
@return: all direct class slots | Below is the the instruction that describes the task:
### Input:
Return the list of slots directly included in the class definition. Includes slots whose
domain is cls -- as declared in slot.domain or class.slots
Does not include slots declared in mixins, apply_to or is_a links
@param cls: class name or class definition name
@return: all direct class slots
### Response:
def cls_slots(self, cls: CLASS_OR_CLASSNAME) -> List[SlotDefinition]:
""" Return the list of slots directly included in the class definition. Includes slots whose
domain is cls -- as declared in slot.domain or class.slots
Does not include slots declared in mixins, apply_to or is_a links
@param cls: class name or class definition name
@return: all direct class slots
"""
if not isinstance(cls, ClassDefinition):
cls = self.schema.classes[cls]
return [self.schema.slots[s] for s in cls.slots] |
def precision(self, label=None):
"""
Returns precision or precision for a given label (category) if specified.
"""
if label is None:
return self.call("precision")
else:
return self.call("precision", float(label)) | Returns precision or precision for a given label (category) if specified. | Below is the the instruction that describes the task:
### Input:
Returns precision or precision for a given label (category) if specified.
### Response:
def precision(self, label=None):
"""
Returns precision or precision for a given label (category) if specified.
"""
if label is None:
return self.call("precision")
else:
return self.call("precision", float(label)) |
def set_empty_for_all(self, row_column_list):
"""Keep all specified subplots completely empty.
:param row_column_list: a list containing (row, column) tuples to
specify the subplots, or None to indicate *all* subplots.
:type row_column_list: list or None
"""
for row, column in row_column_list:
self.set_empty(row, column) | Keep all specified subplots completely empty.
:param row_column_list: a list containing (row, column) tuples to
specify the subplots, or None to indicate *all* subplots.
:type row_column_list: list or None | Below is the the instruction that describes the task:
### Input:
Keep all specified subplots completely empty.
:param row_column_list: a list containing (row, column) tuples to
specify the subplots, or None to indicate *all* subplots.
:type row_column_list: list or None
### Response:
def set_empty_for_all(self, row_column_list):
"""Keep all specified subplots completely empty.
:param row_column_list: a list containing (row, column) tuples to
specify the subplots, or None to indicate *all* subplots.
:type row_column_list: list or None
"""
for row, column in row_column_list:
self.set_empty(row, column) |
def handle_xmlrpc(self, request_text):
"""Handle a single XML-RPC request"""
response = self._marshaled_dispatch(request_text)
sys.stdout.write('Content-Type: text/xml\n')
sys.stdout.write('Content-Length: %d\n' % len(response))
sys.stdout.write('\n')
sys.stdout.write(response) | Handle a single XML-RPC request | Below is the the instruction that describes the task:
### Input:
Handle a single XML-RPC request
### Response:
def handle_xmlrpc(self, request_text):
"""Handle a single XML-RPC request"""
response = self._marshaled_dispatch(request_text)
sys.stdout.write('Content-Type: text/xml\n')
sys.stdout.write('Content-Length: %d\n' % len(response))
sys.stdout.write('\n')
sys.stdout.write(response) |
def lp10(self, subset_k, subset_p, weights={}):
"""Force reactions in K above epsilon while minimizing support of P.
This program forces reactions in subset K to attain flux > epsilon
while minimizing the sum of absolute flux values for reactions
in subset P (L1-regularization).
"""
if self._z is None:
self._add_minimization_vars()
positive = set(subset_k) - self._flipped
negative = set(subset_k) & self._flipped
v = self._v.set(positive)
cs = self._prob.add_linear_constraints(v >= self._epsilon)
self._temp_constr.extend(cs)
v = self._v.set(negative)
cs = self._prob.add_linear_constraints(v <= -self._epsilon)
self._temp_constr.extend(cs)
self._prob.set_objective(self._z.expr(
(rxnid, -weights.get(rxnid, 1)) for rxnid in subset_p))
self._solve() | Force reactions in K above epsilon while minimizing support of P.
This program forces reactions in subset K to attain flux > epsilon
while minimizing the sum of absolute flux values for reactions
in subset P (L1-regularization). | Below is the the instruction that describes the task:
### Input:
Force reactions in K above epsilon while minimizing support of P.
This program forces reactions in subset K to attain flux > epsilon
while minimizing the sum of absolute flux values for reactions
in subset P (L1-regularization).
### Response:
def lp10(self, subset_k, subset_p, weights={}):
"""Force reactions in K above epsilon while minimizing support of P.
This program forces reactions in subset K to attain flux > epsilon
while minimizing the sum of absolute flux values for reactions
in subset P (L1-regularization).
"""
if self._z is None:
self._add_minimization_vars()
positive = set(subset_k) - self._flipped
negative = set(subset_k) & self._flipped
v = self._v.set(positive)
cs = self._prob.add_linear_constraints(v >= self._epsilon)
self._temp_constr.extend(cs)
v = self._v.set(negative)
cs = self._prob.add_linear_constraints(v <= -self._epsilon)
self._temp_constr.extend(cs)
self._prob.set_objective(self._z.expr(
(rxnid, -weights.get(rxnid, 1)) for rxnid in subset_p))
self._solve() |
def object_download(self, bucket, key, start_offset=0, byte_count=None):
"""Reads the contents of an object as text.
Args:
bucket: the name of the bucket containing the object.
key: the key of the object to be read.
start_offset: the start offset of bytes to read.
byte_count: the number of bytes to read. If None, it reads to the end.
Returns:
The text content within the object.
Raises:
Exception if the object could not be read from.
"""
args = {'alt': 'media'}
headers = {}
if start_offset > 0 or byte_count is not None:
header = 'bytes=%d-' % start_offset
if byte_count is not None:
header += '%d' % byte_count
headers['Range'] = header
url = Api._DOWNLOAD_ENDPOINT + (Api._OBJECT_PATH % (bucket, Api._escape_key(key)))
return google.datalab.utils.Http.request(url, args=args, headers=headers,
credentials=self._credentials, raw_response=True) | Reads the contents of an object as text.
Args:
bucket: the name of the bucket containing the object.
key: the key of the object to be read.
start_offset: the start offset of bytes to read.
byte_count: the number of bytes to read. If None, it reads to the end.
Returns:
The text content within the object.
Raises:
Exception if the object could not be read from. | Below is the the instruction that describes the task:
### Input:
Reads the contents of an object as text.
Args:
bucket: the name of the bucket containing the object.
key: the key of the object to be read.
start_offset: the start offset of bytes to read.
byte_count: the number of bytes to read. If None, it reads to the end.
Returns:
The text content within the object.
Raises:
Exception if the object could not be read from.
### Response:
def object_download(self, bucket, key, start_offset=0, byte_count=None):
"""Reads the contents of an object as text.
Args:
bucket: the name of the bucket containing the object.
key: the key of the object to be read.
start_offset: the start offset of bytes to read.
byte_count: the number of bytes to read. If None, it reads to the end.
Returns:
The text content within the object.
Raises:
Exception if the object could not be read from.
"""
args = {'alt': 'media'}
headers = {}
if start_offset > 0 or byte_count is not None:
header = 'bytes=%d-' % start_offset
if byte_count is not None:
header += '%d' % byte_count
headers['Range'] = header
url = Api._DOWNLOAD_ENDPOINT + (Api._OBJECT_PATH % (bucket, Api._escape_key(key)))
return google.datalab.utils.Http.request(url, args=args, headers=headers,
credentials=self._credentials, raw_response=True) |
def data(self):
"""Data for packet creation."""
header = struct.pack('>BLB',
4, # version
self.created, # creation
self.algo_id) # public key algorithm ID
oid = util.prefix_len('>B', self.curve_info['oid'])
blob = self.curve_info['serialize'](self.verifying_key)
return header + oid + blob + self.ecdh_packet | Data for packet creation. | Below is the the instruction that describes the task:
### Input:
Data for packet creation.
### Response:
def data(self):
"""Data for packet creation."""
header = struct.pack('>BLB',
4, # version
self.created, # creation
self.algo_id) # public key algorithm ID
oid = util.prefix_len('>B', self.curve_info['oid'])
blob = self.curve_info['serialize'](self.verifying_key)
return header + oid + blob + self.ecdh_packet |
def _zforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_zforce
PURPOSE:
evaluate the vertical force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the vertical force
HISTORY:
2010-07-09 - Written - Bovy (NYU)
"""
sqrtbz= nu.sqrt(self._b2+z**2.)
asqrtbz= self._a+sqrtbz
if isinstance(R,float) and sqrtbz == asqrtbz:
return (-z/
(R**2.+(self._a+nu.sqrt(z**2.+self._b2))**2.)**(3./2.))
else:
return (-z*asqrtbz/sqrtbz/
(R**2.+(self._a+nu.sqrt(z**2.+self._b2))**2.)**(3./2.)) | NAME:
_zforce
PURPOSE:
evaluate the vertical force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the vertical force
HISTORY:
2010-07-09 - Written - Bovy (NYU) | Below is the the instruction that describes the task:
### Input:
NAME:
_zforce
PURPOSE:
evaluate the vertical force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the vertical force
HISTORY:
2010-07-09 - Written - Bovy (NYU)
### Response:
def _zforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_zforce
PURPOSE:
evaluate the vertical force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the vertical force
HISTORY:
2010-07-09 - Written - Bovy (NYU)
"""
sqrtbz= nu.sqrt(self._b2+z**2.)
asqrtbz= self._a+sqrtbz
if isinstance(R,float) and sqrtbz == asqrtbz:
return (-z/
(R**2.+(self._a+nu.sqrt(z**2.+self._b2))**2.)**(3./2.))
else:
return (-z*asqrtbz/sqrtbz/
(R**2.+(self._a+nu.sqrt(z**2.+self._b2))**2.)**(3./2.)) |
def to_packets(pages, strict=False):
"""Construct a list of packet data from a list of Ogg pages.
If strict is true, the first page must start a new packet,
and the last page must end the last packet.
"""
serial = pages[0].serial
sequence = pages[0].sequence
packets = []
if strict:
if pages[0].continued:
raise ValueError("first packet is continued")
if not pages[-1].complete:
raise ValueError("last packet does not complete")
elif pages and pages[0].continued:
packets.append([b""])
for page in pages:
if serial != page.serial:
raise ValueError("invalid serial number in %r" % page)
elif sequence != page.sequence:
raise ValueError("bad sequence number in %r" % page)
else:
sequence += 1
if page.continued:
packets[-1].append(page.packets[0])
else:
packets.append([page.packets[0]])
packets.extend([p] for p in page.packets[1:])
return [b"".join(p) for p in packets] | Construct a list of packet data from a list of Ogg pages.
If strict is true, the first page must start a new packet,
and the last page must end the last packet. | Below is the the instruction that describes the task:
### Input:
Construct a list of packet data from a list of Ogg pages.
If strict is true, the first page must start a new packet,
and the last page must end the last packet.
### Response:
def to_packets(pages, strict=False):
"""Construct a list of packet data from a list of Ogg pages.
If strict is true, the first page must start a new packet,
and the last page must end the last packet.
"""
serial = pages[0].serial
sequence = pages[0].sequence
packets = []
if strict:
if pages[0].continued:
raise ValueError("first packet is continued")
if not pages[-1].complete:
raise ValueError("last packet does not complete")
elif pages and pages[0].continued:
packets.append([b""])
for page in pages:
if serial != page.serial:
raise ValueError("invalid serial number in %r" % page)
elif sequence != page.sequence:
raise ValueError("bad sequence number in %r" % page)
else:
sequence += 1
if page.continued:
packets[-1].append(page.packets[0])
else:
packets.append([page.packets[0]])
packets.extend([p] for p in page.packets[1:])
return [b"".join(p) for p in packets] |
def __get_host(node, vm_):
'''
Return public IP, private IP, or hostname for the libcloud 'node' object
'''
if __get_ssh_interface(vm_) == 'private_ips' or vm_['external_ip'] is None:
ip_address = node.private_ips[0]
log.info('Salt node data. Private_ip: %s', ip_address)
else:
ip_address = node.public_ips[0]
log.info('Salt node data. Public_ip: %s', ip_address)
if ip_address:
return ip_address
return node.name | Return public IP, private IP, or hostname for the libcloud 'node' object | Below is the the instruction that describes the task:
### Input:
Return public IP, private IP, or hostname for the libcloud 'node' object
### Response:
def __get_host(node, vm_):
'''
Return public IP, private IP, or hostname for the libcloud 'node' object
'''
if __get_ssh_interface(vm_) == 'private_ips' or vm_['external_ip'] is None:
ip_address = node.private_ips[0]
log.info('Salt node data. Private_ip: %s', ip_address)
else:
ip_address = node.public_ips[0]
log.info('Salt node data. Public_ip: %s', ip_address)
if ip_address:
return ip_address
return node.name |
def update_billing_info(self, billing_info):
"""Change this account's billing information to the given `BillingInfo`."""
url = urljoin(self._url, '/billing_info')
response = billing_info.http_request(url, 'PUT', billing_info,
{'Content-Type': 'application/xml; charset=utf-8'})
if response.status == 200:
pass
elif response.status == 201:
billing_info._url = response.getheader('Location')
else:
billing_info.raise_http_error(response)
response_xml = response.read()
logging.getLogger('recurly.http.response').debug(response_xml)
billing_info.update_from_element(ElementTree.fromstring(response_xml)) | Change this account's billing information to the given `BillingInfo`. | Below is the the instruction that describes the task:
### Input:
Change this account's billing information to the given `BillingInfo`.
### Response:
def update_billing_info(self, billing_info):
"""Change this account's billing information to the given `BillingInfo`."""
url = urljoin(self._url, '/billing_info')
response = billing_info.http_request(url, 'PUT', billing_info,
{'Content-Type': 'application/xml; charset=utf-8'})
if response.status == 200:
pass
elif response.status == 201:
billing_info._url = response.getheader('Location')
else:
billing_info.raise_http_error(response)
response_xml = response.read()
logging.getLogger('recurly.http.response').debug(response_xml)
billing_info.update_from_element(ElementTree.fromstring(response_xml)) |
def parse_date(date, default=None):
""" Parse a valid date """
if date == "":
if default is not None:
return default
else:
raise Exception("Unknown format for " + date)
for format_type in ["%Y-%m-%d %H:%M:%S", "%Y-%m-%d %H:%M", "%Y-%m-%d %H", "%Y-%m-%d", "%d/%m/%Y %H:%M:%S", "%d/%m/%Y %H:%M", "%d/%m/%Y %H",
"%d/%m/%Y"]:
try:
return datetime.strptime(date, format_type)
except ValueError:
pass
raise Exception("Unknown format for " + date) | Parse a valid date | Below is the the instruction that describes the task:
### Input:
Parse a valid date
### Response:
def parse_date(date, default=None):
""" Parse a valid date """
if date == "":
if default is not None:
return default
else:
raise Exception("Unknown format for " + date)
for format_type in ["%Y-%m-%d %H:%M:%S", "%Y-%m-%d %H:%M", "%Y-%m-%d %H", "%Y-%m-%d", "%d/%m/%Y %H:%M:%S", "%d/%m/%Y %H:%M", "%d/%m/%Y %H",
"%d/%m/%Y"]:
try:
return datetime.strptime(date, format_type)
except ValueError:
pass
raise Exception("Unknown format for " + date) |
def list_tags(self, pattern: str = None) -> typing.List[str]:
"""
Returns list of tags, optionally matching "pattern"
:param pattern: optional pattern to filter results
:type pattern: str
:return: existing tags
:rtype: list of str
"""
tags: typing.List[str] = [str(tag) for tag in self.repo.tags]
if not pattern:
LOGGER.debug('tags found in repo: %s', tags)
return tags
LOGGER.debug('filtering tags with pattern: %s', pattern)
filtered_tags: typing.List[str] = [tag for tag in tags if pattern in tag]
LOGGER.debug('filtered tags: %s', filtered_tags)
return filtered_tags | Returns list of tags, optionally matching "pattern"
:param pattern: optional pattern to filter results
:type pattern: str
:return: existing tags
:rtype: list of str | Below is the the instruction that describes the task:
### Input:
Returns list of tags, optionally matching "pattern"
:param pattern: optional pattern to filter results
:type pattern: str
:return: existing tags
:rtype: list of str
### Response:
def list_tags(self, pattern: str = None) -> typing.List[str]:
"""
Returns list of tags, optionally matching "pattern"
:param pattern: optional pattern to filter results
:type pattern: str
:return: existing tags
:rtype: list of str
"""
tags: typing.List[str] = [str(tag) for tag in self.repo.tags]
if not pattern:
LOGGER.debug('tags found in repo: %s', tags)
return tags
LOGGER.debug('filtering tags with pattern: %s', pattern)
filtered_tags: typing.List[str] = [tag for tag in tags if pattern in tag]
LOGGER.debug('filtered tags: %s', filtered_tags)
return filtered_tags |
def delete_user(self, user):
"""Delete the given user.
Args:
user (string): User name.
Raises:
requests.HTTPError on failure.
"""
self.service.delete_user(
user, self.url_prefix, self.auth, self.session, self.session_send_opts) | Delete the given user.
Args:
user (string): User name.
Raises:
requests.HTTPError on failure. | Below is the the instruction that describes the task:
### Input:
Delete the given user.
Args:
user (string): User name.
Raises:
requests.HTTPError on failure.
### Response:
def delete_user(self, user):
"""Delete the given user.
Args:
user (string): User name.
Raises:
requests.HTTPError on failure.
"""
self.service.delete_user(
user, self.url_prefix, self.auth, self.session, self.session_send_opts) |
def atleast(cls, lits, bound=1, top_id=None, encoding=EncType.seqcounter):
"""
This method can be used for creating a CNF encoding of an AtLeastK
constraint, i.e. of :math:`\sum_{i=1}^{n}{x_i}\geq k`. The method
takes 1 mandatory argument ``lits`` and 3 default arguments can be
specified: ``bound``, ``top_id``, and ``encoding``.
:param lits: a list of literals in the sum.
:param bound: the value of bound :math:`k`.
:param top_id: top variable identifier used so far.
:param encoding: identifier of the encoding to use.
:type lits: iterable(int)
:type bound: int
:type top_id: integer or None
:type encoding: integer
Parameter ``top_id`` serves to increase integer identifiers of
auxiliary variables introduced during the encoding process. This is
helpful when augmenting an existing CNF formula with the new
cardinality encoding to make sure there is no collision between
identifiers of the variables. If specified the identifiers of the
first auxiliary variable will be ``top_id+1``.
The default value of ``encoding`` is :attr:`Enctype.seqcounter`.
The method *translates* the AtLeast constraint into an AtMost
constraint by *negating* the literals of ``lits``, creating a new
bound :math:`n-k` and invoking :meth:`CardEnc.atmost` with the
modified list of literals and the new bound.
:raises CardEnc.NoSuchEncodingError: if encoding does not exist.
:rtype: a :class:`.CNFPlus` object where the new \
clauses (or the new native atmost constraint) are stored.
"""
if encoding < 0 or encoding > 9:
raise(NoSuchEncodingError(encoding))
if not top_id:
top_id = max(map(lambda x: abs(x), lits))
# we are going to return this formula
ret = CNFPlus()
# Minicard's native representation is handled separately
if encoding == 9:
ret.atmosts, ret.nv = [([-l for l in lits], len(lits) - bound)], top_id
return ret
# saving default SIGINT handler
def_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_DFL)
res = pycard.encode_atleast(lits, bound, top_id, encoding)
# recovering default SIGINT handler
def_sigint_handler = signal.signal(signal.SIGINT, def_sigint_handler)
if res:
ret.clauses, ret.nv = res
return ret | This method can be used for creating a CNF encoding of an AtLeastK
constraint, i.e. of :math:`\sum_{i=1}^{n}{x_i}\geq k`. The method
takes 1 mandatory argument ``lits`` and 3 default arguments can be
specified: ``bound``, ``top_id``, and ``encoding``.
:param lits: a list of literals in the sum.
:param bound: the value of bound :math:`k`.
:param top_id: top variable identifier used so far.
:param encoding: identifier of the encoding to use.
:type lits: iterable(int)
:type bound: int
:type top_id: integer or None
:type encoding: integer
Parameter ``top_id`` serves to increase integer identifiers of
auxiliary variables introduced during the encoding process. This is
helpful when augmenting an existing CNF formula with the new
cardinality encoding to make sure there is no collision between
identifiers of the variables. If specified the identifiers of the
first auxiliary variable will be ``top_id+1``.
The default value of ``encoding`` is :attr:`Enctype.seqcounter`.
The method *translates* the AtLeast constraint into an AtMost
constraint by *negating* the literals of ``lits``, creating a new
bound :math:`n-k` and invoking :meth:`CardEnc.atmost` with the
modified list of literals and the new bound.
:raises CardEnc.NoSuchEncodingError: if encoding does not exist.
:rtype: a :class:`.CNFPlus` object where the new \
clauses (or the new native atmost constraint) are stored. | Below is the the instruction that describes the task:
### Input:
This method can be used for creating a CNF encoding of an AtLeastK
constraint, i.e. of :math:`\sum_{i=1}^{n}{x_i}\geq k`. The method
takes 1 mandatory argument ``lits`` and 3 default arguments can be
specified: ``bound``, ``top_id``, and ``encoding``.
:param lits: a list of literals in the sum.
:param bound: the value of bound :math:`k`.
:param top_id: top variable identifier used so far.
:param encoding: identifier of the encoding to use.
:type lits: iterable(int)
:type bound: int
:type top_id: integer or None
:type encoding: integer
Parameter ``top_id`` serves to increase integer identifiers of
auxiliary variables introduced during the encoding process. This is
helpful when augmenting an existing CNF formula with the new
cardinality encoding to make sure there is no collision between
identifiers of the variables. If specified the identifiers of the
first auxiliary variable will be ``top_id+1``.
The default value of ``encoding`` is :attr:`Enctype.seqcounter`.
The method *translates* the AtLeast constraint into an AtMost
constraint by *negating* the literals of ``lits``, creating a new
bound :math:`n-k` and invoking :meth:`CardEnc.atmost` with the
modified list of literals and the new bound.
:raises CardEnc.NoSuchEncodingError: if encoding does not exist.
:rtype: a :class:`.CNFPlus` object where the new \
clauses (or the new native atmost constraint) are stored.
### Response:
def atleast(cls, lits, bound=1, top_id=None, encoding=EncType.seqcounter):
"""
This method can be used for creating a CNF encoding of an AtLeastK
constraint, i.e. of :math:`\sum_{i=1}^{n}{x_i}\geq k`. The method
takes 1 mandatory argument ``lits`` and 3 default arguments can be
specified: ``bound``, ``top_id``, and ``encoding``.
:param lits: a list of literals in the sum.
:param bound: the value of bound :math:`k`.
:param top_id: top variable identifier used so far.
:param encoding: identifier of the encoding to use.
:type lits: iterable(int)
:type bound: int
:type top_id: integer or None
:type encoding: integer
Parameter ``top_id`` serves to increase integer identifiers of
auxiliary variables introduced during the encoding process. This is
helpful when augmenting an existing CNF formula with the new
cardinality encoding to make sure there is no collision between
identifiers of the variables. If specified the identifiers of the
first auxiliary variable will be ``top_id+1``.
The default value of ``encoding`` is :attr:`Enctype.seqcounter`.
The method *translates* the AtLeast constraint into an AtMost
constraint by *negating* the literals of ``lits``, creating a new
bound :math:`n-k` and invoking :meth:`CardEnc.atmost` with the
modified list of literals and the new bound.
:raises CardEnc.NoSuchEncodingError: if encoding does not exist.
:rtype: a :class:`.CNFPlus` object where the new \
clauses (or the new native atmost constraint) are stored.
"""
if encoding < 0 or encoding > 9:
raise(NoSuchEncodingError(encoding))
if not top_id:
top_id = max(map(lambda x: abs(x), lits))
# we are going to return this formula
ret = CNFPlus()
# Minicard's native representation is handled separately
if encoding == 9:
ret.atmosts, ret.nv = [([-l for l in lits], len(lits) - bound)], top_id
return ret
# saving default SIGINT handler
def_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_DFL)
res = pycard.encode_atleast(lits, bound, top_id, encoding)
# recovering default SIGINT handler
def_sigint_handler = signal.signal(signal.SIGINT, def_sigint_handler)
if res:
ret.clauses, ret.nv = res
return ret |
def _get_supply_array_construct(self):
""" Returns a construct for an array of power supply data.
"""
bus_no = integer.setResultsName("bus_no")
s_rating = real.setResultsName("s_rating") # MVA
p_direction = real.setResultsName("p_direction") # CPF
p_bid_max = real.setResultsName("p_bid_max") # p.u.
p_bid_min = real.setResultsName("p_bid_min") # p.u.
p_bid_actual = real.setResultsName("p_bid_actual") # p.u.
p_fixed = real.setResultsName("p_fixed") # $/hr
p_proportional = real.setResultsName("p_proportional") # $/MWh
p_quadratic = real.setResultsName("p_quadratic") # $/MW^2h
q_fixed = real.setResultsName("q_fixed") # $/hr
q_proportional = real.setResultsName("q_proportional") # $/MVArh
q_quadratic = real.setResultsName("q_quadratic") # $/MVAr^2h
commitment = boolean.setResultsName("commitment")
cost_tie_break = real.setResultsName("cost_tie_break") # $/MWh
lp_factor = real.setResultsName("lp_factor")# Loss participation factor
q_max = real.setResultsName("q_max") # p.u.
q_min = real.setResultsName("q_min") # p.u.
cost_cong_up = real.setResultsName("cost_cong_up") # $/h
cost_cong_down = real.setResultsName("cost_cong_down") # $/h
status = Optional(boolean).setResultsName("status")
supply_data = bus_no + s_rating + p_direction + p_bid_max + \
p_bid_min + p_bid_actual + p_fixed + p_proportional + \
p_quadratic + q_fixed + q_proportional + q_quadratic + \
commitment + cost_tie_break + lp_factor + q_max + q_min + \
cost_cong_up + cost_cong_down + status + scolon
supply_data.setParseAction(self.push_supply)
supply_array = Literal("Supply.con") + "=" + "[" + "..." + \
ZeroOrMore(supply_data + Optional("]" + scolon))
return supply_array | Returns a construct for an array of power supply data. | Below is the the instruction that describes the task:
### Input:
Returns a construct for an array of power supply data.
### Response:
def _get_supply_array_construct(self):
""" Returns a construct for an array of power supply data.
"""
bus_no = integer.setResultsName("bus_no")
s_rating = real.setResultsName("s_rating") # MVA
p_direction = real.setResultsName("p_direction") # CPF
p_bid_max = real.setResultsName("p_bid_max") # p.u.
p_bid_min = real.setResultsName("p_bid_min") # p.u.
p_bid_actual = real.setResultsName("p_bid_actual") # p.u.
p_fixed = real.setResultsName("p_fixed") # $/hr
p_proportional = real.setResultsName("p_proportional") # $/MWh
p_quadratic = real.setResultsName("p_quadratic") # $/MW^2h
q_fixed = real.setResultsName("q_fixed") # $/hr
q_proportional = real.setResultsName("q_proportional") # $/MVArh
q_quadratic = real.setResultsName("q_quadratic") # $/MVAr^2h
commitment = boolean.setResultsName("commitment")
cost_tie_break = real.setResultsName("cost_tie_break") # $/MWh
lp_factor = real.setResultsName("lp_factor")# Loss participation factor
q_max = real.setResultsName("q_max") # p.u.
q_min = real.setResultsName("q_min") # p.u.
cost_cong_up = real.setResultsName("cost_cong_up") # $/h
cost_cong_down = real.setResultsName("cost_cong_down") # $/h
status = Optional(boolean).setResultsName("status")
supply_data = bus_no + s_rating + p_direction + p_bid_max + \
p_bid_min + p_bid_actual + p_fixed + p_proportional + \
p_quadratic + q_fixed + q_proportional + q_quadratic + \
commitment + cost_tie_break + lp_factor + q_max + q_min + \
cost_cong_up + cost_cong_down + status + scolon
supply_data.setParseAction(self.push_supply)
supply_array = Literal("Supply.con") + "=" + "[" + "..." + \
ZeroOrMore(supply_data + Optional("]" + scolon))
return supply_array |
def processRequest(self, request: Request, frm: str):
"""
Handle a REQUEST from the client.
If the request has already been executed, the node re-sends the reply to
the client. Otherwise, the node acknowledges the client request, adds it
to its list of client requests, and sends a PROPAGATE to the
remaining nodes.
:param request: the REQUEST from the client
:param frm: the name of the client that sent this REQUEST
"""
logger.debug("{} received client request: {} from {}".
format(self.name, request, frm))
self.nodeRequestSpikeMonitorData['accum'] += 1
# TODO: What if client sends requests with same request id quickly so
# before reply for one is generated, the other comes. In that
# case we need to keep track of what requests ids node has seen
# in-memory and once request with a particular request id is processed,
# it should be removed from that in-memory DS.
# If request is already processed(there is a reply for the
# request in
# the node's transaction store then return the reply from the
# transaction store)
# TODO: What if the reply was a REQNACK? Its not gonna be found in the
# replies.
txn_type = request.operation[TXN_TYPE]
if self.is_action(txn_type):
self.process_action(request, frm)
elif txn_type == GET_TXN:
self.handle_get_txn_req(request, frm)
self.total_read_request_number += 1
elif self.is_query(txn_type):
self.process_query(request, frm)
self.total_read_request_number += 1
elif self.can_write_txn(txn_type):
reply = self.getReplyFromLedgerForRequest(request)
if reply:
logger.debug("{} returning reply from already processed "
"REQUEST: {}".format(self, request))
self.transmitToClient(reply, frm)
return
# If the node is not already processing the request
if not self.isProcessingReq(request.key):
self.startedProcessingReq(request.key, frm)
# forced request should be processed before consensus
self.handle_request_if_forced(request)
# If not already got the propagate request(PROPAGATE) for the
# corresponding client request(REQUEST)
self.recordAndPropagate(request, frm)
self.send_ack_to_client((request.identifier, request.reqId), frm)
else:
raise InvalidClientRequest(
request.identifier,
request.reqId,
'Pool is in readonly mode, try again in 60 seconds') | Handle a REQUEST from the client.
If the request has already been executed, the node re-sends the reply to
the client. Otherwise, the node acknowledges the client request, adds it
to its list of client requests, and sends a PROPAGATE to the
remaining nodes.
:param request: the REQUEST from the client
:param frm: the name of the client that sent this REQUEST | Below is the the instruction that describes the task:
### Input:
Handle a REQUEST from the client.
If the request has already been executed, the node re-sends the reply to
the client. Otherwise, the node acknowledges the client request, adds it
to its list of client requests, and sends a PROPAGATE to the
remaining nodes.
:param request: the REQUEST from the client
:param frm: the name of the client that sent this REQUEST
### Response:
def processRequest(self, request: Request, frm: str):
"""
Handle a REQUEST from the client.
If the request has already been executed, the node re-sends the reply to
the client. Otherwise, the node acknowledges the client request, adds it
to its list of client requests, and sends a PROPAGATE to the
remaining nodes.
:param request: the REQUEST from the client
:param frm: the name of the client that sent this REQUEST
"""
logger.debug("{} received client request: {} from {}".
format(self.name, request, frm))
self.nodeRequestSpikeMonitorData['accum'] += 1
# TODO: What if client sends requests with same request id quickly so
# before reply for one is generated, the other comes. In that
# case we need to keep track of what requests ids node has seen
# in-memory and once request with a particular request id is processed,
# it should be removed from that in-memory DS.
# If request is already processed(there is a reply for the
# request in
# the node's transaction store then return the reply from the
# transaction store)
# TODO: What if the reply was a REQNACK? Its not gonna be found in the
# replies.
txn_type = request.operation[TXN_TYPE]
if self.is_action(txn_type):
self.process_action(request, frm)
elif txn_type == GET_TXN:
self.handle_get_txn_req(request, frm)
self.total_read_request_number += 1
elif self.is_query(txn_type):
self.process_query(request, frm)
self.total_read_request_number += 1
elif self.can_write_txn(txn_type):
reply = self.getReplyFromLedgerForRequest(request)
if reply:
logger.debug("{} returning reply from already processed "
"REQUEST: {}".format(self, request))
self.transmitToClient(reply, frm)
return
# If the node is not already processing the request
if not self.isProcessingReq(request.key):
self.startedProcessingReq(request.key, frm)
# forced request should be processed before consensus
self.handle_request_if_forced(request)
# If not already got the propagate request(PROPAGATE) for the
# corresponding client request(REQUEST)
self.recordAndPropagate(request, frm)
self.send_ack_to_client((request.identifier, request.reqId), frm)
else:
raise InvalidClientRequest(
request.identifier,
request.reqId,
'Pool is in readonly mode, try again in 60 seconds') |
def clean(self, text, **kwargs):
"""This is the master method that cleans all of the filth out of the
dirty dirty ``text``. All keyword arguments to this function are passed
through to the ``Filth.replace_with`` method to fine-tune how the
``Filth`` is cleaned.
"""
if sys.version_info < (3, 0):
# Only in Python 2. In 3 every string is a Python 2 unicode
if not isinstance(text, unicode):
raise exceptions.UnicodeRequired
clean_chunks = []
filth = Filth()
for next_filth in self.iter_filth(text):
clean_chunks.append(text[filth.end:next_filth.beg])
clean_chunks.append(next_filth.replace_with(**kwargs))
filth = next_filth
clean_chunks.append(text[filth.end:])
return u''.join(clean_chunks) | This is the master method that cleans all of the filth out of the
dirty dirty ``text``. All keyword arguments to this function are passed
through to the ``Filth.replace_with`` method to fine-tune how the
``Filth`` is cleaned. | Below is the the instruction that describes the task:
### Input:
This is the master method that cleans all of the filth out of the
dirty dirty ``text``. All keyword arguments to this function are passed
through to the ``Filth.replace_with`` method to fine-tune how the
``Filth`` is cleaned.
### Response:
def clean(self, text, **kwargs):
"""This is the master method that cleans all of the filth out of the
dirty dirty ``text``. All keyword arguments to this function are passed
through to the ``Filth.replace_with`` method to fine-tune how the
``Filth`` is cleaned.
"""
if sys.version_info < (3, 0):
# Only in Python 2. In 3 every string is a Python 2 unicode
if not isinstance(text, unicode):
raise exceptions.UnicodeRequired
clean_chunks = []
filth = Filth()
for next_filth in self.iter_filth(text):
clean_chunks.append(text[filth.end:next_filth.beg])
clean_chunks.append(next_filth.replace_with(**kwargs))
filth = next_filth
clean_chunks.append(text[filth.end:])
return u''.join(clean_chunks) |
def netdev():
'''
.. versionchanged:: 2016.3.2
Return the network device stats for this minion
.. versionchanged:: 2016.11.4
Added support for AIX
CLI Example:
.. code-block:: bash
salt '*' status.netdev
'''
def linux_netdev():
'''
linux specific implementation of netdev
'''
ret = {}
try:
with salt.utils.files.fopen('/proc/net/dev', 'r') as fp_:
stats = salt.utils.stringutils.to_unicode(fp_.read())
except IOError:
pass
else:
for line in stats.splitlines():
if not line:
continue
if line.find(':') < 0:
continue
comps = line.split()
# Fix lines like eth0:9999..'
comps[0] = line.split(':')[0].strip()
# Support lines both like eth0:999 and eth0: 9999
comps.insert(1, line.split(':')[1].strip().split()[0])
ret[comps[0]] = {'iface': comps[0],
'rx_bytes': _number(comps[2]),
'rx_compressed': _number(comps[8]),
'rx_drop': _number(comps[5]),
'rx_errs': _number(comps[4]),
'rx_fifo': _number(comps[6]),
'rx_frame': _number(comps[7]),
'rx_multicast': _number(comps[9]),
'rx_packets': _number(comps[3]),
'tx_bytes': _number(comps[10]),
'tx_carrier': _number(comps[16]),
'tx_colls': _number(comps[15]),
'tx_compressed': _number(comps[17]),
'tx_drop': _number(comps[13]),
'tx_errs': _number(comps[12]),
'tx_fifo': _number(comps[14]),
'tx_packets': _number(comps[11])}
return ret
def freebsd_netdev():
'''
freebsd specific implementation of netdev
'''
_dict_tree = lambda: collections.defaultdict(_dict_tree)
ret = _dict_tree()
netstat = __salt__['cmd.run']('netstat -i -n -4 -b -d').splitlines()
netstat += __salt__['cmd.run']('netstat -i -n -6 -b -d').splitlines()[1:]
header = netstat[0].split()
for line in netstat[1:]:
comps = line.split()
for i in range(4, 13): # The columns we want
ret[comps[0]][comps[2]][comps[3]][header[i]] = _number(comps[i])
return ret
def sunos_netdev():
'''
sunos specific implementation of netdev
'''
ret = {}
##NOTE: we cannot use hwaddr_interfaces here, so we grab both ip4 and ip6
for dev in __grains__['ip4_interfaces'].keys() + __grains__['ip6_interfaces']:
# fetch device info
netstat_ipv4 = __salt__['cmd.run']('netstat -i -I {dev} -n -f inet'.format(dev=dev)).splitlines()
netstat_ipv6 = __salt__['cmd.run']('netstat -i -I {dev} -n -f inet6'.format(dev=dev)).splitlines()
# prepare data
netstat_ipv4[0] = netstat_ipv4[0].split()
netstat_ipv4[1] = netstat_ipv4[1].split()
netstat_ipv6[0] = netstat_ipv6[0].split()
netstat_ipv6[1] = netstat_ipv6[1].split()
# add data
ret[dev] = {}
for i in range(len(netstat_ipv4[0])-1):
if netstat_ipv4[0][i] == 'Name':
continue
if netstat_ipv4[0][i] in ['Address', 'Net/Dest']:
ret[dev]['IPv4 {field}'.format(field=netstat_ipv4[0][i])] = netstat_ipv4[1][i]
else:
ret[dev][netstat_ipv4[0][i]] = _number(netstat_ipv4[1][i])
for i in range(len(netstat_ipv6[0])-1):
if netstat_ipv6[0][i] == 'Name':
continue
if netstat_ipv6[0][i] in ['Address', 'Net/Dest']:
ret[dev]['IPv6 {field}'.format(field=netstat_ipv6[0][i])] = netstat_ipv6[1][i]
else:
ret[dev][netstat_ipv6[0][i]] = _number(netstat_ipv6[1][i])
return ret
def aix_netdev():
'''
AIX specific implementation of netdev
'''
ret = {}
fields = []
procn = None
for dev in __grains__['ip4_interfaces'].keys() + __grains__['ip6_interfaces'].keys():
# fetch device info
#root@la68pp002_pub:/opt/salt/lib/python2.7/site-packages/salt/modules# netstat -i -n -I en0 -f inet6
#Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll
#en0 1500 link#3 e2.eb.32.42.84.c 10029668 0 446490 0 0
#en0 1500 172.29.128 172.29.149.95 10029668 0 446490 0 0
#root@la68pp002_pub:/opt/salt/lib/python2.7/site-packages/salt/modules# netstat -i -n -I en0 -f inet6
#Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll
#en0 1500 link#3 e2.eb.32.42.84.c 10029731 0 446499 0 0
netstat_ipv4 = __salt__['cmd.run']('netstat -i -n -I {dev} -f inet'.format(dev=dev)).splitlines()
netstat_ipv6 = __salt__['cmd.run']('netstat -i -n -I {dev} -f inet6'.format(dev=dev)).splitlines()
# add data
ret[dev] = []
for line in netstat_ipv4:
if line.startswith('Name'):
fields = line.split()
continue
comps = line.split()
if len(comps) < 3:
raise CommandExecutionError('Insufficent data returned by command to process \'{0}\''.format(line))
if comps[2].startswith('link'):
continue
procn = len(ret[dev])
ret[dev].append({})
ret[dev][procn]['ipv4'] = {}
for i in range(1, len(fields)):
if len(comps) > i:
ret[dev][procn]['ipv4'][fields[i]] = comps[i]
for line in netstat_ipv6:
if line.startswith('Name'):
fields = line.split()
continue
comps = line.split()
if len(comps) < 3:
raise CommandExecutionError('Insufficent data returned by command to process \'{0}\''.format(line))
if comps[2].startswith('link'):
continue
procn = len(ret[dev])
ret[dev].append({})
ret[dev][procn]['ipv6'] = {}
for i in range(1, len(fields)):
if len(comps) > i:
ret[dev][procn]['ipv6'][fields[i]] = comps[i]
return ret
# dict that returns a function that does the right thing per platform
get_version = {
'Linux': linux_netdev,
'FreeBSD': freebsd_netdev,
'SunOS': sunos_netdev,
'AIX': aix_netdev,
}
errmsg = 'This method is unsupported on the current operating system!'
return get_version.get(__grains__['kernel'], lambda: errmsg)() | .. versionchanged:: 2016.3.2
Return the network device stats for this minion
.. versionchanged:: 2016.11.4
Added support for AIX
CLI Example:
.. code-block:: bash
salt '*' status.netdev | Below is the the instruction that describes the task:
### Input:
.. versionchanged:: 2016.3.2
Return the network device stats for this minion
.. versionchanged:: 2016.11.4
Added support for AIX
CLI Example:
.. code-block:: bash
salt '*' status.netdev
### Response:
def netdev():
'''
.. versionchanged:: 2016.3.2
Return the network device stats for this minion
.. versionchanged:: 2016.11.4
Added support for AIX
CLI Example:
.. code-block:: bash
salt '*' status.netdev
'''
def linux_netdev():
'''
linux specific implementation of netdev
'''
ret = {}
try:
with salt.utils.files.fopen('/proc/net/dev', 'r') as fp_:
stats = salt.utils.stringutils.to_unicode(fp_.read())
except IOError:
pass
else:
for line in stats.splitlines():
if not line:
continue
if line.find(':') < 0:
continue
comps = line.split()
# Fix lines like eth0:9999..'
comps[0] = line.split(':')[0].strip()
# Support lines both like eth0:999 and eth0: 9999
comps.insert(1, line.split(':')[1].strip().split()[0])
ret[comps[0]] = {'iface': comps[0],
'rx_bytes': _number(comps[2]),
'rx_compressed': _number(comps[8]),
'rx_drop': _number(comps[5]),
'rx_errs': _number(comps[4]),
'rx_fifo': _number(comps[6]),
'rx_frame': _number(comps[7]),
'rx_multicast': _number(comps[9]),
'rx_packets': _number(comps[3]),
'tx_bytes': _number(comps[10]),
'tx_carrier': _number(comps[16]),
'tx_colls': _number(comps[15]),
'tx_compressed': _number(comps[17]),
'tx_drop': _number(comps[13]),
'tx_errs': _number(comps[12]),
'tx_fifo': _number(comps[14]),
'tx_packets': _number(comps[11])}
return ret
def freebsd_netdev():
'''
freebsd specific implementation of netdev
'''
_dict_tree = lambda: collections.defaultdict(_dict_tree)
ret = _dict_tree()
netstat = __salt__['cmd.run']('netstat -i -n -4 -b -d').splitlines()
netstat += __salt__['cmd.run']('netstat -i -n -6 -b -d').splitlines()[1:]
header = netstat[0].split()
for line in netstat[1:]:
comps = line.split()
for i in range(4, 13): # The columns we want
ret[comps[0]][comps[2]][comps[3]][header[i]] = _number(comps[i])
return ret
def sunos_netdev():
'''
sunos specific implementation of netdev
'''
ret = {}
##NOTE: we cannot use hwaddr_interfaces here, so we grab both ip4 and ip6
for dev in __grains__['ip4_interfaces'].keys() + __grains__['ip6_interfaces']:
# fetch device info
netstat_ipv4 = __salt__['cmd.run']('netstat -i -I {dev} -n -f inet'.format(dev=dev)).splitlines()
netstat_ipv6 = __salt__['cmd.run']('netstat -i -I {dev} -n -f inet6'.format(dev=dev)).splitlines()
# prepare data
netstat_ipv4[0] = netstat_ipv4[0].split()
netstat_ipv4[1] = netstat_ipv4[1].split()
netstat_ipv6[0] = netstat_ipv6[0].split()
netstat_ipv6[1] = netstat_ipv6[1].split()
# add data
ret[dev] = {}
for i in range(len(netstat_ipv4[0])-1):
if netstat_ipv4[0][i] == 'Name':
continue
if netstat_ipv4[0][i] in ['Address', 'Net/Dest']:
ret[dev]['IPv4 {field}'.format(field=netstat_ipv4[0][i])] = netstat_ipv4[1][i]
else:
ret[dev][netstat_ipv4[0][i]] = _number(netstat_ipv4[1][i])
for i in range(len(netstat_ipv6[0])-1):
if netstat_ipv6[0][i] == 'Name':
continue
if netstat_ipv6[0][i] in ['Address', 'Net/Dest']:
ret[dev]['IPv6 {field}'.format(field=netstat_ipv6[0][i])] = netstat_ipv6[1][i]
else:
ret[dev][netstat_ipv6[0][i]] = _number(netstat_ipv6[1][i])
return ret
def aix_netdev():
'''
AIX specific implementation of netdev
'''
ret = {}
fields = []
procn = None
for dev in __grains__['ip4_interfaces'].keys() + __grains__['ip6_interfaces'].keys():
# fetch device info
#root@la68pp002_pub:/opt/salt/lib/python2.7/site-packages/salt/modules# netstat -i -n -I en0 -f inet6
#Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll
#en0 1500 link#3 e2.eb.32.42.84.c 10029668 0 446490 0 0
#en0 1500 172.29.128 172.29.149.95 10029668 0 446490 0 0
#root@la68pp002_pub:/opt/salt/lib/python2.7/site-packages/salt/modules# netstat -i -n -I en0 -f inet6
#Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll
#en0 1500 link#3 e2.eb.32.42.84.c 10029731 0 446499 0 0
netstat_ipv4 = __salt__['cmd.run']('netstat -i -n -I {dev} -f inet'.format(dev=dev)).splitlines()
netstat_ipv6 = __salt__['cmd.run']('netstat -i -n -I {dev} -f inet6'.format(dev=dev)).splitlines()
# add data
ret[dev] = []
for line in netstat_ipv4:
if line.startswith('Name'):
fields = line.split()
continue
comps = line.split()
if len(comps) < 3:
raise CommandExecutionError('Insufficent data returned by command to process \'{0}\''.format(line))
if comps[2].startswith('link'):
continue
procn = len(ret[dev])
ret[dev].append({})
ret[dev][procn]['ipv4'] = {}
for i in range(1, len(fields)):
if len(comps) > i:
ret[dev][procn]['ipv4'][fields[i]] = comps[i]
for line in netstat_ipv6:
if line.startswith('Name'):
fields = line.split()
continue
comps = line.split()
if len(comps) < 3:
raise CommandExecutionError('Insufficent data returned by command to process \'{0}\''.format(line))
if comps[2].startswith('link'):
continue
procn = len(ret[dev])
ret[dev].append({})
ret[dev][procn]['ipv6'] = {}
for i in range(1, len(fields)):
if len(comps) > i:
ret[dev][procn]['ipv6'][fields[i]] = comps[i]
return ret
# dict that returns a function that does the right thing per platform
get_version = {
'Linux': linux_netdev,
'FreeBSD': freebsd_netdev,
'SunOS': sunos_netdev,
'AIX': aix_netdev,
}
errmsg = 'This method is unsupported on the current operating system!'
return get_version.get(__grains__['kernel'], lambda: errmsg)() |
def get_conf_from_module(mod):
"""return configuration from module with defaults no worry about None type
"""
conf = ModuleConfig(CONF_SPEC)
# get imported module
mod = _get_correct_module(mod)
conf.set_module(mod)
# extarct from default object or from module
if hasattr(mod, 'default'):
default = mod.default
conf = extract_conf_from(default, conf)
else:
conf = extract_conf_from(mod, conf)
return conf | return configuration from module with defaults no worry about None type | Below is the the instruction that describes the task:
### Input:
return configuration from module with defaults no worry about None type
### Response:
def get_conf_from_module(mod):
"""return configuration from module with defaults no worry about None type
"""
conf = ModuleConfig(CONF_SPEC)
# get imported module
mod = _get_correct_module(mod)
conf.set_module(mod)
# extarct from default object or from module
if hasattr(mod, 'default'):
default = mod.default
conf = extract_conf_from(default, conf)
else:
conf = extract_conf_from(mod, conf)
return conf |
def read_from(self, provider, **options):
""" All :class:`Pointer` fields in the `Sequence` read the necessary
number of bytes from the data :class:`Provider` for their referenced
:attr:`~Pointer.data` object. Null pointer are ignored.
:param Provider provider: data :class:`Provider`.
:keyword bool nested: if ``True`` all :class:`Pointer` fields in the
:attr:`~Pointer.data` objects of all :class:`Pointer` fields in the
`Sequence` reads their referenced :attr:`~Pointer.data` object as
well (chained method call).
Each :class:`Pointer` field stores the bytes for its referenced
:attr:`~Pointer.data` object in its :attr:`~Pointer.bytestream`.
"""
for item in iter(self):
# Container or Pointer
if is_mixin(item):
item.read_from(provider, **options) | All :class:`Pointer` fields in the `Sequence` read the necessary
number of bytes from the data :class:`Provider` for their referenced
:attr:`~Pointer.data` object. Null pointer are ignored.
:param Provider provider: data :class:`Provider`.
:keyword bool nested: if ``True`` all :class:`Pointer` fields in the
:attr:`~Pointer.data` objects of all :class:`Pointer` fields in the
`Sequence` reads their referenced :attr:`~Pointer.data` object as
well (chained method call).
Each :class:`Pointer` field stores the bytes for its referenced
:attr:`~Pointer.data` object in its :attr:`~Pointer.bytestream`. | Below is the the instruction that describes the task:
### Input:
All :class:`Pointer` fields in the `Sequence` read the necessary
number of bytes from the data :class:`Provider` for their referenced
:attr:`~Pointer.data` object. Null pointer are ignored.
:param Provider provider: data :class:`Provider`.
:keyword bool nested: if ``True`` all :class:`Pointer` fields in the
:attr:`~Pointer.data` objects of all :class:`Pointer` fields in the
`Sequence` reads their referenced :attr:`~Pointer.data` object as
well (chained method call).
Each :class:`Pointer` field stores the bytes for its referenced
:attr:`~Pointer.data` object in its :attr:`~Pointer.bytestream`.
### Response:
def read_from(self, provider, **options):
""" All :class:`Pointer` fields in the `Sequence` read the necessary
number of bytes from the data :class:`Provider` for their referenced
:attr:`~Pointer.data` object. Null pointer are ignored.
:param Provider provider: data :class:`Provider`.
:keyword bool nested: if ``True`` all :class:`Pointer` fields in the
:attr:`~Pointer.data` objects of all :class:`Pointer` fields in the
`Sequence` reads their referenced :attr:`~Pointer.data` object as
well (chained method call).
Each :class:`Pointer` field stores the bytes for its referenced
:attr:`~Pointer.data` object in its :attr:`~Pointer.bytestream`.
"""
for item in iter(self):
# Container or Pointer
if is_mixin(item):
item.read_from(provider, **options) |
def get_default_url():
"""
Grab a default URL from bugzillarc [DEFAULT] url=X
"""
from bugzilla.base import _open_bugzillarc
cfg = _open_bugzillarc()
if cfg:
cfgurl = cfg.defaults().get("url", None)
if cfgurl is not None:
log.debug("bugzillarc: found cli url=%s", cfgurl)
return cfgurl
return DEFAULT_BZ | Grab a default URL from bugzillarc [DEFAULT] url=X | Below is the the instruction that describes the task:
### Input:
Grab a default URL from bugzillarc [DEFAULT] url=X
### Response:
def get_default_url():
"""
Grab a default URL from bugzillarc [DEFAULT] url=X
"""
from bugzilla.base import _open_bugzillarc
cfg = _open_bugzillarc()
if cfg:
cfgurl = cfg.defaults().get("url", None)
if cfgurl is not None:
log.debug("bugzillarc: found cli url=%s", cfgurl)
return cfgurl
return DEFAULT_BZ |
def QA_fetch_get_hkfund_list(ip=None, port=None):
"""[summary]
Keyword Arguments:
ip {[type]} -- [description] (default: {None})
port {[type]} -- [description] (default: {None})
# ζΈ―θ‘ HKMARKET
27 5 ι¦ζΈ―ζζ° FH
31 2 ι¦ζΈ―δΈ»ζΏ KH
48 2 ι¦ζΈ―εδΈζΏ KG
49 2 ι¦ζΈ―εΊι KT
43 1 Bθ‘转Hθ‘ HB
"""
global extension_market_list
extension_market_list = QA_fetch_get_extensionmarket_list(
) if extension_market_list is None else extension_market_list
return extension_market_list.query('market==49') | [summary]
Keyword Arguments:
ip {[type]} -- [description] (default: {None})
port {[type]} -- [description] (default: {None})
# ζΈ―θ‘ HKMARKET
27 5 ι¦ζΈ―ζζ° FH
31 2 ι¦ζΈ―δΈ»ζΏ KH
48 2 ι¦ζΈ―εδΈζΏ KG
49 2 ι¦ζΈ―εΊι KT
43 1 Bθ‘转Hθ‘ HB | Below is the the instruction that describes the task:
### Input:
[summary]
Keyword Arguments:
ip {[type]} -- [description] (default: {None})
port {[type]} -- [description] (default: {None})
# ζΈ―θ‘ HKMARKET
27 5 ι¦ζΈ―ζζ° FH
31 2 ι¦ζΈ―δΈ»ζΏ KH
48 2 ι¦ζΈ―εδΈζΏ KG
49 2 ι¦ζΈ―εΊι KT
43 1 Bθ‘转Hθ‘ HB
### Response:
def QA_fetch_get_hkfund_list(ip=None, port=None):
"""[summary]
Keyword Arguments:
ip {[type]} -- [description] (default: {None})
port {[type]} -- [description] (default: {None})
# ζΈ―θ‘ HKMARKET
27 5 ι¦ζΈ―ζζ° FH
31 2 ι¦ζΈ―δΈ»ζΏ KH
48 2 ι¦ζΈ―εδΈζΏ KG
49 2 ι¦ζΈ―εΊι KT
43 1 Bθ‘转Hθ‘ HB
"""
global extension_market_list
extension_market_list = QA_fetch_get_extensionmarket_list(
) if extension_market_list is None else extension_market_list
return extension_market_list.query('market==49') |
def p_expr_new(p):
'expr : NEW class_name_reference ctor_arguments'
p[0] = ast.New(p[2], p[3], lineno=p.lineno(1)) | expr : NEW class_name_reference ctor_arguments | Below is the the instruction that describes the task:
### Input:
expr : NEW class_name_reference ctor_arguments
### Response:
def p_expr_new(p):
'expr : NEW class_name_reference ctor_arguments'
p[0] = ast.New(p[2], p[3], lineno=p.lineno(1)) |
def get_data_blob(self, rawtx):
"""TODO add docstring"""
tx = deserialize.tx(rawtx)
data = control.get_data_blob(tx)
return serialize.data(data) | TODO add docstring | Below is the the instruction that describes the task:
### Input:
TODO add docstring
### Response:
def get_data_blob(self, rawtx):
"""TODO add docstring"""
tx = deserialize.tx(rawtx)
data = control.get_data_blob(tx)
return serialize.data(data) |
def lookupGeoInfo(positions):
"""Looks up lat/lon info with goole given a list
of positions as parsed by parsePositionFile.
Returns google results in form of dicionary
"""
list_data=[]
oldlat=0
oldlon=0
d={}
for pos in positions:
# Only lookup point if it is above threshold
diff_lat=abs(float(pos['lat'])-oldlat)
diff_lon=abs(float(pos['lon'])-oldlon)
if (diff_lat>POS_THRESHOLD_DEG) or\
(diff_lon>POS_THRESHOLD_DEG):
d=lookup_by_latlon(pos['lat'],pos['lon'])
oldlat=float(pos['lat'])
oldlon=float(pos['lon'])
else:
logger.debug("Skipping %s/%s, close to prev"%(pos['lat'],pos['lon']))
# Use fresh lookup value or old value
list_data.append(d)
logger.info('looked up %d positions'%(len(list_data)))
return list_data | Looks up lat/lon info with goole given a list
of positions as parsed by parsePositionFile.
Returns google results in form of dicionary | Below is the the instruction that describes the task:
### Input:
Looks up lat/lon info with goole given a list
of positions as parsed by parsePositionFile.
Returns google results in form of dicionary
### Response:
def lookupGeoInfo(positions):
"""Looks up lat/lon info with goole given a list
of positions as parsed by parsePositionFile.
Returns google results in form of dicionary
"""
list_data=[]
oldlat=0
oldlon=0
d={}
for pos in positions:
# Only lookup point if it is above threshold
diff_lat=abs(float(pos['lat'])-oldlat)
diff_lon=abs(float(pos['lon'])-oldlon)
if (diff_lat>POS_THRESHOLD_DEG) or\
(diff_lon>POS_THRESHOLD_DEG):
d=lookup_by_latlon(pos['lat'],pos['lon'])
oldlat=float(pos['lat'])
oldlon=float(pos['lon'])
else:
logger.debug("Skipping %s/%s, close to prev"%(pos['lat'],pos['lon']))
# Use fresh lookup value or old value
list_data.append(d)
logger.info('looked up %d positions'%(len(list_data)))
return list_data |
def DeleteGRRTempFile(path):
"""Delete a GRR temp file.
To limit possible damage the path must be absolute and either the
file must be within any of the Client.tempdir_roots or the file name
must begin with Client.tempfile_prefix.
Args:
path: path string to file to be deleted.
Raises:
OSError: Permission denied, or file not found.
ErrorBadPath: Path must be absolute.
ErrorNotTempFile: Filename must start with Client.tempfile_prefix.
ErrorNotAFile: File to delete does not exist.
"""
precondition.AssertType(path, Text)
if not os.path.isabs(path):
raise ErrorBadPath("Path must be absolute")
prefix = config.CONFIG["Client.tempfile_prefix"]
directories = [
GetTempDirForRoot(root) for root in config.CONFIG["Client.tempdir_roots"]
]
if not _CheckIfPathIsValidForDeletion(
path, prefix=prefix, directories=directories):
msg = ("Can't delete temp file %s. Filename must start with %s "
"or lie within any of %s.")
raise ErrorNotTempFile(msg % (path, prefix, ";".join(directories)))
if os.path.exists(path):
# Clear our file handle cache so the file can be deleted.
files.FILE_HANDLE_CACHE.Flush()
os.remove(path)
else:
raise ErrorNotAFile("%s does not exist." % path) | Delete a GRR temp file.
To limit possible damage the path must be absolute and either the
file must be within any of the Client.tempdir_roots or the file name
must begin with Client.tempfile_prefix.
Args:
path: path string to file to be deleted.
Raises:
OSError: Permission denied, or file not found.
ErrorBadPath: Path must be absolute.
ErrorNotTempFile: Filename must start with Client.tempfile_prefix.
ErrorNotAFile: File to delete does not exist. | Below is the the instruction that describes the task:
### Input:
Delete a GRR temp file.
To limit possible damage the path must be absolute and either the
file must be within any of the Client.tempdir_roots or the file name
must begin with Client.tempfile_prefix.
Args:
path: path string to file to be deleted.
Raises:
OSError: Permission denied, or file not found.
ErrorBadPath: Path must be absolute.
ErrorNotTempFile: Filename must start with Client.tempfile_prefix.
ErrorNotAFile: File to delete does not exist.
### Response:
def DeleteGRRTempFile(path):
"""Delete a GRR temp file.
To limit possible damage the path must be absolute and either the
file must be within any of the Client.tempdir_roots or the file name
must begin with Client.tempfile_prefix.
Args:
path: path string to file to be deleted.
Raises:
OSError: Permission denied, or file not found.
ErrorBadPath: Path must be absolute.
ErrorNotTempFile: Filename must start with Client.tempfile_prefix.
ErrorNotAFile: File to delete does not exist.
"""
precondition.AssertType(path, Text)
if not os.path.isabs(path):
raise ErrorBadPath("Path must be absolute")
prefix = config.CONFIG["Client.tempfile_prefix"]
directories = [
GetTempDirForRoot(root) for root in config.CONFIG["Client.tempdir_roots"]
]
if not _CheckIfPathIsValidForDeletion(
path, prefix=prefix, directories=directories):
msg = ("Can't delete temp file %s. Filename must start with %s "
"or lie within any of %s.")
raise ErrorNotTempFile(msg % (path, prefix, ";".join(directories)))
if os.path.exists(path):
# Clear our file handle cache so the file can be deleted.
files.FILE_HANDLE_CACHE.Flush()
os.remove(path)
else:
raise ErrorNotAFile("%s does not exist." % path) |
def connect_to_nsqd(self, host, port):
"""
Adds a connection to ``nsqd`` at the specified address.
:param host: the address to connect to
:param port: the port to connect to
"""
assert isinstance(host, string_types)
assert isinstance(port, int)
conn = AsyncConn(host, port, **self.conn_kwargs)
conn.on('identify', self._on_connection_identify)
conn.on('identify_response', self._on_connection_identify_response)
conn.on('auth', self._on_connection_auth)
conn.on('auth_response', self._on_connection_auth_response)
conn.on('error', self._on_connection_error)
conn.on('close', self._on_connection_close)
conn.on('ready', self._on_connection_ready)
conn.on('message', self._on_message)
conn.on('heartbeat', self._on_heartbeat)
conn.on('backoff', functools.partial(self._on_backoff_resume, success=False))
conn.on('resume', functools.partial(self._on_backoff_resume, success=True))
conn.on('continue', functools.partial(self._on_backoff_resume, success=None))
if conn.id in self.conns:
return
# only attempt to re-connect once every 10s per destination
# this throttles reconnects to failed endpoints
now = time.time()
last_connect_attempt = self.connection_attempts.get(conn.id)
if last_connect_attempt and last_connect_attempt > now - 10:
return
self.connection_attempts[conn.id] = now
logger.info('[%s:%s] connecting to nsqd', conn.id, self.name)
conn.connect()
return conn | Adds a connection to ``nsqd`` at the specified address.
:param host: the address to connect to
:param port: the port to connect to | Below is the the instruction that describes the task:
### Input:
Adds a connection to ``nsqd`` at the specified address.
:param host: the address to connect to
:param port: the port to connect to
### Response:
def connect_to_nsqd(self, host, port):
"""
Adds a connection to ``nsqd`` at the specified address.
:param host: the address to connect to
:param port: the port to connect to
"""
assert isinstance(host, string_types)
assert isinstance(port, int)
conn = AsyncConn(host, port, **self.conn_kwargs)
conn.on('identify', self._on_connection_identify)
conn.on('identify_response', self._on_connection_identify_response)
conn.on('auth', self._on_connection_auth)
conn.on('auth_response', self._on_connection_auth_response)
conn.on('error', self._on_connection_error)
conn.on('close', self._on_connection_close)
conn.on('ready', self._on_connection_ready)
conn.on('message', self._on_message)
conn.on('heartbeat', self._on_heartbeat)
conn.on('backoff', functools.partial(self._on_backoff_resume, success=False))
conn.on('resume', functools.partial(self._on_backoff_resume, success=True))
conn.on('continue', functools.partial(self._on_backoff_resume, success=None))
if conn.id in self.conns:
return
# only attempt to re-connect once every 10s per destination
# this throttles reconnects to failed endpoints
now = time.time()
last_connect_attempt = self.connection_attempts.get(conn.id)
if last_connect_attempt and last_connect_attempt > now - 10:
return
self.connection_attempts[conn.id] = now
logger.info('[%s:%s] connecting to nsqd', conn.id, self.name)
conn.connect()
return conn |
def delete_change_set(awsclient, change_set_name, stack_name):
"""Delete specified change set. Currently we only use this during
automated regression testing. But we have plans so lets locate this
functionality here
:param awsclient:
:param change_set_name:
:param stack_name:
"""
client = awsclient.get_client('cloudformation')
response = client.delete_change_set(
ChangeSetName=change_set_name,
StackName=stack_name) | Delete specified change set. Currently we only use this during
automated regression testing. But we have plans so lets locate this
functionality here
:param awsclient:
:param change_set_name:
:param stack_name: | Below is the the instruction that describes the task:
### Input:
Delete specified change set. Currently we only use this during
automated regression testing. But we have plans so lets locate this
functionality here
:param awsclient:
:param change_set_name:
:param stack_name:
### Response:
def delete_change_set(awsclient, change_set_name, stack_name):
"""Delete specified change set. Currently we only use this during
automated regression testing. But we have plans so lets locate this
functionality here
:param awsclient:
:param change_set_name:
:param stack_name:
"""
client = awsclient.get_client('cloudformation')
response = client.delete_change_set(
ChangeSetName=change_set_name,
StackName=stack_name) |
def IsDerivedFunction(clean_lines, linenum):
"""Check if current line contains an inherited function.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line contains a function with "override"
virt-specifier.
"""
# Scan back a few lines for start of current function
for i in xrange(linenum, max(-1, linenum - 10), -1):
match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i])
if match:
# Look for "override" after the matching closing parenthesis
line, _, closing_paren = CloseExpression(
clean_lines, i, len(match.group(1)))
return (closing_paren >= 0 and
Search(r'\boverride\b', line[closing_paren:]))
return False | Check if current line contains an inherited function.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line contains a function with "override"
virt-specifier. | Below is the the instruction that describes the task:
### Input:
Check if current line contains an inherited function.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line contains a function with "override"
virt-specifier.
### Response:
def IsDerivedFunction(clean_lines, linenum):
"""Check if current line contains an inherited function.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line contains a function with "override"
virt-specifier.
"""
# Scan back a few lines for start of current function
for i in xrange(linenum, max(-1, linenum - 10), -1):
match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i])
if match:
# Look for "override" after the matching closing parenthesis
line, _, closing_paren = CloseExpression(
clean_lines, i, len(match.group(1)))
return (closing_paren >= 0 and
Search(r'\boverride\b', line[closing_paren:]))
return False |
def down_ec2(instance_id, region, access_key_id, secret_access_key):
""" shutdown of an existing EC2 instance """
conn = connect_to_ec2(region, access_key_id, secret_access_key)
# get the instance_id from the state file, and stop the instance
instance = conn.stop_instances(instance_ids=instance_id)[0]
while instance.state != "stopped":
log_yellow("Instance state: %s" % instance.state)
sleep(10)
instance.update()
log_green('Instance state: %s' % instance.state) | shutdown of an existing EC2 instance | Below is the the instruction that describes the task:
### Input:
shutdown of an existing EC2 instance
### Response:
def down_ec2(instance_id, region, access_key_id, secret_access_key):
""" shutdown of an existing EC2 instance """
conn = connect_to_ec2(region, access_key_id, secret_access_key)
# get the instance_id from the state file, and stop the instance
instance = conn.stop_instances(instance_ids=instance_id)[0]
while instance.state != "stopped":
log_yellow("Instance state: %s" % instance.state)
sleep(10)
instance.update()
log_green('Instance state: %s' % instance.state) |
def cartoon(args):
"""
%prog synteny.py
Generate cartoon illustration of SynFind.
"""
p = OptionParser(cartoon.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="10x7")
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
# Panel A
A = CartoonRegion(41)
A.draw(root, .35, .85, strip=False, color=False)
x1, x2 = A.x1, A.x2
lsg = "lightslategray"
pad = .01
xc, yc = .35, .88
arrowlen = x2 - xc - pad
arrowprops = dict(length_includes_head=True, width=.01, fc=lsg, lw=0,
head_length=arrowlen * .15, head_width=.03)
p = FancyArrow(xc - pad, yc, -arrowlen, 0, shape="left", **arrowprops)
root.add_patch(p)
p = FancyArrow(xc + pad, yc, arrowlen, 0, shape="right", **arrowprops)
root.add_patch(p)
yt = yc + 4 * pad
root.text((x1 + xc) / 2, yt, "20 genes upstream", ha="center")
root.text((x2 + xc) / 2, yt, "20 genes downstream", ha="center")
root.plot((xc,), (yc,), "o", mfc='w', mec=lsg, mew=2, lw=2, color=lsg)
root.text(xc, yt, "Query gene", ha="center")
# Panel B
A.draw(root, .35, .7, strip=False)
RoundRect(root, (.07, .49), .56, .14, fc='y', alpha=.2)
a = deepcopy(A)
a.evolve(mode='S', target=10)
a.draw(root, .35, .6)
b = deepcopy(A)
b.evolve(mode='F', target=8)
b.draw(root, .35, .56)
c = deepcopy(A)
c.evolve(mode='G', target=6)
c.draw(root, .35, .52)
for x in (a, b, c):
root.text(.64, x.y, "Score={0}".format(x.nonwhites), va="center")
# Panel C
A.truncate_between_flankers()
a.truncate_between_flankers()
b.truncate_between_flankers()
c.truncate_between_flankers(target=6)
plot_diagram(root, .14, .2, A, a, "S", "syntenic")
plot_diagram(root, .37, .2, A, b, "F", "missing, with both flankers")
plot_diagram(root, .6, .2, A, c, "G", "missing, with one flanker")
labels = ((.04, .95, 'A'), (.04, .75, 'B'), (.04, .4, 'C'))
panel_labels(root, labels)
# Descriptions
xt = .85
desc = ("Extract neighborhood",
"of *window* size",
"Count gene pairs within *window*",
"Find regions above *score* cutoff",
"Identify flankers",
"Annotate syntelog class"
)
for yt, t in zip((.88, .84, .64, .6, .3, .26), desc):
root.text(xt, yt, markup(t), ha="center", va="center")
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
pf = "cartoon"
image_name = pf + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) | %prog synteny.py
Generate cartoon illustration of SynFind. | Below is the the instruction that describes the task:
### Input:
%prog synteny.py
Generate cartoon illustration of SynFind.
### Response:
def cartoon(args):
"""
%prog synteny.py
Generate cartoon illustration of SynFind.
"""
p = OptionParser(cartoon.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="10x7")
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
# Panel A
A = CartoonRegion(41)
A.draw(root, .35, .85, strip=False, color=False)
x1, x2 = A.x1, A.x2
lsg = "lightslategray"
pad = .01
xc, yc = .35, .88
arrowlen = x2 - xc - pad
arrowprops = dict(length_includes_head=True, width=.01, fc=lsg, lw=0,
head_length=arrowlen * .15, head_width=.03)
p = FancyArrow(xc - pad, yc, -arrowlen, 0, shape="left", **arrowprops)
root.add_patch(p)
p = FancyArrow(xc + pad, yc, arrowlen, 0, shape="right", **arrowprops)
root.add_patch(p)
yt = yc + 4 * pad
root.text((x1 + xc) / 2, yt, "20 genes upstream", ha="center")
root.text((x2 + xc) / 2, yt, "20 genes downstream", ha="center")
root.plot((xc,), (yc,), "o", mfc='w', mec=lsg, mew=2, lw=2, color=lsg)
root.text(xc, yt, "Query gene", ha="center")
# Panel B
A.draw(root, .35, .7, strip=False)
RoundRect(root, (.07, .49), .56, .14, fc='y', alpha=.2)
a = deepcopy(A)
a.evolve(mode='S', target=10)
a.draw(root, .35, .6)
b = deepcopy(A)
b.evolve(mode='F', target=8)
b.draw(root, .35, .56)
c = deepcopy(A)
c.evolve(mode='G', target=6)
c.draw(root, .35, .52)
for x in (a, b, c):
root.text(.64, x.y, "Score={0}".format(x.nonwhites), va="center")
# Panel C
A.truncate_between_flankers()
a.truncate_between_flankers()
b.truncate_between_flankers()
c.truncate_between_flankers(target=6)
plot_diagram(root, .14, .2, A, a, "S", "syntenic")
plot_diagram(root, .37, .2, A, b, "F", "missing, with both flankers")
plot_diagram(root, .6, .2, A, c, "G", "missing, with one flanker")
labels = ((.04, .95, 'A'), (.04, .75, 'B'), (.04, .4, 'C'))
panel_labels(root, labels)
# Descriptions
xt = .85
desc = ("Extract neighborhood",
"of *window* size",
"Count gene pairs within *window*",
"Find regions above *score* cutoff",
"Identify flankers",
"Annotate syntelog class"
)
for yt, t in zip((.88, .84, .64, .6, .3, .26), desc):
root.text(xt, yt, markup(t), ha="center", va="center")
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
pf = "cartoon"
image_name = pf + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) |
def getRaw(self, instance, **kwargs):
"""Returns raw field value (possible wrapped in BaseUnit)
"""
value = ObjectField.get(self, instance, **kwargs)
# getattr(instance, "Remarks") returns a BaseUnit
if callable(value):
value = value()
return value | Returns raw field value (possible wrapped in BaseUnit) | Below is the the instruction that describes the task:
### Input:
Returns raw field value (possible wrapped in BaseUnit)
### Response:
def getRaw(self, instance, **kwargs):
"""Returns raw field value (possible wrapped in BaseUnit)
"""
value = ObjectField.get(self, instance, **kwargs)
# getattr(instance, "Remarks") returns a BaseUnit
if callable(value):
value = value()
return value |
def _try_coerce_args(self, values, other):
"""
Coerce values and other to dtype 'i8'. NaN and NaT convert to
the smallest i8, and will correctly round-trip to NaT if converted
back in _try_coerce_result. values is always ndarray-like, other
may not be
Parameters
----------
values : ndarray-like
other : ndarray-like or scalar
Returns
-------
base-type values, base-type other
"""
values = values.view('i8')
if isinstance(other, bool):
raise TypeError
elif is_null_datetimelike(other):
other = tslibs.iNaT
elif isinstance(other, (datetime, np.datetime64, date)):
other = self._box_func(other)
if getattr(other, 'tz') is not None:
raise TypeError("cannot coerce a Timestamp with a tz on a "
"naive Block")
other = other.asm8.view('i8')
elif hasattr(other, 'dtype') and is_datetime64_dtype(other):
other = other.astype('i8', copy=False).view('i8')
else:
# coercion issues
# let higher levels handle
raise TypeError(other)
return values, other | Coerce values and other to dtype 'i8'. NaN and NaT convert to
the smallest i8, and will correctly round-trip to NaT if converted
back in _try_coerce_result. values is always ndarray-like, other
may not be
Parameters
----------
values : ndarray-like
other : ndarray-like or scalar
Returns
-------
base-type values, base-type other | Below is the the instruction that describes the task:
### Input:
Coerce values and other to dtype 'i8'. NaN and NaT convert to
the smallest i8, and will correctly round-trip to NaT if converted
back in _try_coerce_result. values is always ndarray-like, other
may not be
Parameters
----------
values : ndarray-like
other : ndarray-like or scalar
Returns
-------
base-type values, base-type other
### Response:
def _try_coerce_args(self, values, other):
"""
Coerce values and other to dtype 'i8'. NaN and NaT convert to
the smallest i8, and will correctly round-trip to NaT if converted
back in _try_coerce_result. values is always ndarray-like, other
may not be
Parameters
----------
values : ndarray-like
other : ndarray-like or scalar
Returns
-------
base-type values, base-type other
"""
values = values.view('i8')
if isinstance(other, bool):
raise TypeError
elif is_null_datetimelike(other):
other = tslibs.iNaT
elif isinstance(other, (datetime, np.datetime64, date)):
other = self._box_func(other)
if getattr(other, 'tz') is not None:
raise TypeError("cannot coerce a Timestamp with a tz on a "
"naive Block")
other = other.asm8.view('i8')
elif hasattr(other, 'dtype') and is_datetime64_dtype(other):
other = other.astype('i8', copy=False).view('i8')
else:
# coercion issues
# let higher levels handle
raise TypeError(other)
return values, other |
def log_results(self, output_path=None, run_id=None):
"""Saves files for the minimization.
Notes
-----
Currently saves a logfile with best individual and a pdb of
the best model.
"""
best_ind = self.halloffame[0]
model_params = self.parse_individual(
best_ind) # need to change name of 'params'
if output_path is None:
output_path = os.getcwd()
if run_id is None:
run_id = '{:%Y%m%d-%H%M%S}'.format(
datetime.datetime.now())
with open('{0}/{1}_opt_log.txt'.format(
output_path, run_id), 'w') as log_file:
log_file.write('\nEvaluated {0} models in total\n'.format(
self._model_count))
log_file.write('Run ID is {0}\n'.format(run_id))
log_file.write('Best fitness is {0}\n'.format(
self.halloffame[0].fitness))
log_file.write(
'Parameters of best model are {0}\n'.format(model_params))
log_file.write(
'Best individual is {0}\n'.format(self.halloffame[0]))
for i, entry in enumerate(self.halloffame[0]):
if entry > 0.95:
log_file.write(
"Warning! Parameter {0} is at or near maximum allowed "
"value\n".format(i + 1))
elif entry < -0.95:
log_file.write(
"Warning! Parameter {0} is at or near minimum allowed "
"value\n".format(i + 1))
log_file.write('Minimization history: \n{0}'.format(self.logbook))
with open('{0}/{1}_opt_best_model.pdb'.format(
output_path, run_id), 'w') as output_file:
output_file.write(self.best_model.pdb)
return | Saves files for the minimization.
Notes
-----
Currently saves a logfile with best individual and a pdb of
the best model. | Below is the the instruction that describes the task:
### Input:
Saves files for the minimization.
Notes
-----
Currently saves a logfile with best individual and a pdb of
the best model.
### Response:
def log_results(self, output_path=None, run_id=None):
"""Saves files for the minimization.
Notes
-----
Currently saves a logfile with best individual and a pdb of
the best model.
"""
best_ind = self.halloffame[0]
model_params = self.parse_individual(
best_ind) # need to change name of 'params'
if output_path is None:
output_path = os.getcwd()
if run_id is None:
run_id = '{:%Y%m%d-%H%M%S}'.format(
datetime.datetime.now())
with open('{0}/{1}_opt_log.txt'.format(
output_path, run_id), 'w') as log_file:
log_file.write('\nEvaluated {0} models in total\n'.format(
self._model_count))
log_file.write('Run ID is {0}\n'.format(run_id))
log_file.write('Best fitness is {0}\n'.format(
self.halloffame[0].fitness))
log_file.write(
'Parameters of best model are {0}\n'.format(model_params))
log_file.write(
'Best individual is {0}\n'.format(self.halloffame[0]))
for i, entry in enumerate(self.halloffame[0]):
if entry > 0.95:
log_file.write(
"Warning! Parameter {0} is at or near maximum allowed "
"value\n".format(i + 1))
elif entry < -0.95:
log_file.write(
"Warning! Parameter {0} is at or near minimum allowed "
"value\n".format(i + 1))
log_file.write('Minimization history: \n{0}'.format(self.logbook))
with open('{0}/{1}_opt_best_model.pdb'.format(
output_path, run_id), 'w') as output_file:
output_file.write(self.best_model.pdb)
return |
def hessian_component(self, index1, index2):
"""Compute the hessian of the energy for one atom pair"""
result = np.zeros((3, 3), float)
if index1 == index2:
for index3 in range(self.numc):
if self.scaling[index1, index3] > 0:
d_1 = 1/self.distances[index1, index3]
for (se, ve), (sg, vg), (sh, vh) in zip(
self.yield_pair_energies(index1, index3),
self.yield_pair_gradients(index1, index3),
self.yield_pair_hessians(index1, index3)
):
result += (
+sh*self.dirouters[index1, index3]*ve
+sg*(np.identity(3, float) - self.dirouters[index1, index3])*ve*d_1
+sg*np.outer(self.directions[index1, index3], vg)
+sg*np.outer(vg, self.directions[index1, index3])
+se*vh
)*self.scaling[index1, index3]
elif self.scaling[index1, index2] > 0:
d_1 = 1/self.distances[index1, index2]
for (se, ve), (sg, vg), (sh, vh) in zip(
self.yield_pair_energies(index1, index2),
self.yield_pair_gradients(index1, index2),
self.yield_pair_hessians(index1, index2)
):
result -= (
+sh*self.dirouters[index1, index2]*ve
+sg*(np.identity(3, float) - self.dirouters[index1, index2])*ve*d_1
+sg*np.outer(self.directions[index1, index2], vg)
+sg*np.outer(vg, self.directions[index1, index2])
+se*vh
)*self.scaling[index1, index2]
return result | Compute the hessian of the energy for one atom pair | Below is the the instruction that describes the task:
### Input:
Compute the hessian of the energy for one atom pair
### Response:
def hessian_component(self, index1, index2):
"""Compute the hessian of the energy for one atom pair"""
result = np.zeros((3, 3), float)
if index1 == index2:
for index3 in range(self.numc):
if self.scaling[index1, index3] > 0:
d_1 = 1/self.distances[index1, index3]
for (se, ve), (sg, vg), (sh, vh) in zip(
self.yield_pair_energies(index1, index3),
self.yield_pair_gradients(index1, index3),
self.yield_pair_hessians(index1, index3)
):
result += (
+sh*self.dirouters[index1, index3]*ve
+sg*(np.identity(3, float) - self.dirouters[index1, index3])*ve*d_1
+sg*np.outer(self.directions[index1, index3], vg)
+sg*np.outer(vg, self.directions[index1, index3])
+se*vh
)*self.scaling[index1, index3]
elif self.scaling[index1, index2] > 0:
d_1 = 1/self.distances[index1, index2]
for (se, ve), (sg, vg), (sh, vh) in zip(
self.yield_pair_energies(index1, index2),
self.yield_pair_gradients(index1, index2),
self.yield_pair_hessians(index1, index2)
):
result -= (
+sh*self.dirouters[index1, index2]*ve
+sg*(np.identity(3, float) - self.dirouters[index1, index2])*ve*d_1
+sg*np.outer(self.directions[index1, index2], vg)
+sg*np.outer(vg, self.directions[index1, index2])
+se*vh
)*self.scaling[index1, index2]
return result |
def copy(self):
"""
m.copy() -- Return a 'deep' copy of the motif
"""
a = Motif()
a.__dict__ = self.__dict__.copy()
return a | m.copy() -- Return a 'deep' copy of the motif | Below is the the instruction that describes the task:
### Input:
m.copy() -- Return a 'deep' copy of the motif
### Response:
def copy(self):
"""
m.copy() -- Return a 'deep' copy of the motif
"""
a = Motif()
a.__dict__ = self.__dict__.copy()
return a |
async def fastStreamedQuery(self, url, *, headers=None, verify=True):
""" Send a GET request with short timeout, do not retry, and return streamed response. """
response = await self.session.get(url,
headers=self._buildHeaders(headers),
timeout=HTTP_SHORT_TIMEOUT,
ssl=verify)
response.raise_for_status()
return response | Send a GET request with short timeout, do not retry, and return streamed response. | Below is the the instruction that describes the task:
### Input:
Send a GET request with short timeout, do not retry, and return streamed response.
### Response:
async def fastStreamedQuery(self, url, *, headers=None, verify=True):
""" Send a GET request with short timeout, do not retry, and return streamed response. """
response = await self.session.get(url,
headers=self._buildHeaders(headers),
timeout=HTTP_SHORT_TIMEOUT,
ssl=verify)
response.raise_for_status()
return response |
def solver(AA, N_max, symNx = 2, throw_out_modes=False):
""" Constructs the matrix A and the vector b from a timeseries of toy
action-angles AA to solve for the vector x = (J_0,J_1,J_2,S...) where
x contains all Fourier components of the generating function with |n|<N_max """
# Find all integer component n_vectors which lie within sphere of radius N_max
# Here we have assumed that the potential is symmetric x->-x, y->-y, z->-z
# This can be relaxed by changing symN to 1
# Additionally due to time reversal symmetry S_n = -S_-n so we only consider
# "half" of the n-vector-space
angs = unroll_angles(AA.T[3:].T,np.ones(3))
symNz = 2
NNx = range(-N_max, N_max+1, symNx)
NNy = range(-N_max, N_max+1, symNz)
NNz = range(-N_max, N_max+1, symNz)
n_vectors = np.array([[i,j,k] for (i,j,k) in product(NNx,NNy,NNz)
if(not(i==0 and j==0 and k==0) # exclude zero vector
and (k>0 # northern hemisphere
or (k==0 and j>0) # half of x-y plane
or (k==0 and j==0 and i>0)) # half of x axis
and np.sqrt(i*i+j*j+k*k)<=N_max)]) # inside sphere
xxx = check_each_direction(n_vectors,angs)
if(throw_out_modes):
n_vectors = np.delete(n_vectors,check_each_direction(n_vectors,angs),axis=0)
n = len(n_vectors)+3
b = np.zeros(shape=(n, ))
a = np.zeros(shape=(n,n))
a[:3,:3]=len(AA)*np.identity(3)
for i in AA:
a[:3,3:]+=2.*n_vectors.T[:3]*np.cos(np.dot(n_vectors,i[3:]))
a[3:,3:]+=4.*np.dot(n_vectors,n_vectors.T)*np.outer(np.cos(np.dot(n_vectors,i[3:])),np.cos(np.dot(n_vectors,i[3:])))
b[:3]+=i[:3]
b[3:]+=2.*np.dot(n_vectors,i[:3])*np.cos(np.dot(n_vectors,i[3:]))
a[3:,:3]=a[:3,3:].T
return np.array(solve(a,b)), n_vectors | Constructs the matrix A and the vector b from a timeseries of toy
action-angles AA to solve for the vector x = (J_0,J_1,J_2,S...) where
x contains all Fourier components of the generating function with |n|<N_max | Below is the the instruction that describes the task:
### Input:
Constructs the matrix A and the vector b from a timeseries of toy
action-angles AA to solve for the vector x = (J_0,J_1,J_2,S...) where
x contains all Fourier components of the generating function with |n|<N_max
### Response:
def solver(AA, N_max, symNx = 2, throw_out_modes=False):
""" Constructs the matrix A and the vector b from a timeseries of toy
action-angles AA to solve for the vector x = (J_0,J_1,J_2,S...) where
x contains all Fourier components of the generating function with |n|<N_max """
# Find all integer component n_vectors which lie within sphere of radius N_max
# Here we have assumed that the potential is symmetric x->-x, y->-y, z->-z
# This can be relaxed by changing symN to 1
# Additionally due to time reversal symmetry S_n = -S_-n so we only consider
# "half" of the n-vector-space
angs = unroll_angles(AA.T[3:].T,np.ones(3))
symNz = 2
NNx = range(-N_max, N_max+1, symNx)
NNy = range(-N_max, N_max+1, symNz)
NNz = range(-N_max, N_max+1, symNz)
n_vectors = np.array([[i,j,k] for (i,j,k) in product(NNx,NNy,NNz)
if(not(i==0 and j==0 and k==0) # exclude zero vector
and (k>0 # northern hemisphere
or (k==0 and j>0) # half of x-y plane
or (k==0 and j==0 and i>0)) # half of x axis
and np.sqrt(i*i+j*j+k*k)<=N_max)]) # inside sphere
xxx = check_each_direction(n_vectors,angs)
if(throw_out_modes):
n_vectors = np.delete(n_vectors,check_each_direction(n_vectors,angs),axis=0)
n = len(n_vectors)+3
b = np.zeros(shape=(n, ))
a = np.zeros(shape=(n,n))
a[:3,:3]=len(AA)*np.identity(3)
for i in AA:
a[:3,3:]+=2.*n_vectors.T[:3]*np.cos(np.dot(n_vectors,i[3:]))
a[3:,3:]+=4.*np.dot(n_vectors,n_vectors.T)*np.outer(np.cos(np.dot(n_vectors,i[3:])),np.cos(np.dot(n_vectors,i[3:])))
b[:3]+=i[:3]
b[3:]+=2.*np.dot(n_vectors,i[:3])*np.cos(np.dot(n_vectors,i[3:]))
a[3:,:3]=a[:3,3:].T
return np.array(solve(a,b)), n_vectors |
def val_factory(val, datatypes):
"""
return an instance of `val` that is of type `datatype`.
keep track of exceptions so we can produce meaningful error messages.
"""
exceptions = []
for dt in datatypes:
try:
if isinstance(val, dt):
return val
return type_handler_object(val, dt)
except Exception as e:
exceptions.append(str(e))
# if we get here, we never found a valid value. raise an error
raise ValueError('val_factory: Unable to instantiate {val} from types {types}. Exceptions: {excs}'.
format(val=val, types=datatypes, excs=exceptions)) | return an instance of `val` that is of type `datatype`.
keep track of exceptions so we can produce meaningful error messages. | Below is the the instruction that describes the task:
### Input:
return an instance of `val` that is of type `datatype`.
keep track of exceptions so we can produce meaningful error messages.
### Response:
def val_factory(val, datatypes):
"""
return an instance of `val` that is of type `datatype`.
keep track of exceptions so we can produce meaningful error messages.
"""
exceptions = []
for dt in datatypes:
try:
if isinstance(val, dt):
return val
return type_handler_object(val, dt)
except Exception as e:
exceptions.append(str(e))
# if we get here, we never found a valid value. raise an error
raise ValueError('val_factory: Unable to instantiate {val} from types {types}. Exceptions: {excs}'.
format(val=val, types=datatypes, excs=exceptions)) |
def _getMemoryBit(cpu, bitbase, bitoffset):
""" Calculate address and bit offset given a base address and a bit offset
relative to that address (in the form of asm operands) """
assert bitbase.type == 'memory'
assert bitbase.size >= bitoffset.size
addr = bitbase.address()
offt = Operators.SEXTEND(bitoffset.read(), bitoffset.size, bitbase.size)
offt_is_neg = offt >= (1 << (bitbase.size - 1))
offt_in_bytes = offt // 8
bitpos = offt % 8
new_addr = addr + Operators.ITEBV(bitbase.size, offt_is_neg, -offt_in_bytes, offt_in_bytes)
return (new_addr, bitpos) | Calculate address and bit offset given a base address and a bit offset
relative to that address (in the form of asm operands) | Below is the the instruction that describes the task:
### Input:
Calculate address and bit offset given a base address and a bit offset
relative to that address (in the form of asm operands)
### Response:
def _getMemoryBit(cpu, bitbase, bitoffset):
""" Calculate address and bit offset given a base address and a bit offset
relative to that address (in the form of asm operands) """
assert bitbase.type == 'memory'
assert bitbase.size >= bitoffset.size
addr = bitbase.address()
offt = Operators.SEXTEND(bitoffset.read(), bitoffset.size, bitbase.size)
offt_is_neg = offt >= (1 << (bitbase.size - 1))
offt_in_bytes = offt // 8
bitpos = offt % 8
new_addr = addr + Operators.ITEBV(bitbase.size, offt_is_neg, -offt_in_bytes, offt_in_bytes)
return (new_addr, bitpos) |
def detect_volume_shadow_copies(self):
"""Method to call vshadowmount and mount NTFS volume shadow copies.
:return: iterable with the :class:`Volume` objects of the VSS
:raises CommandNotFoundError: if the underlying command does not exist
:raises SubSystemError: if the underlying command fails
:raises NoMountpointAvailableError: if there is no mountpoint available
"""
self._make_mountpoint(var_name='vss', suffix="vss", in_paths=True)
try:
_util.check_call_(["vshadowmount", "-o", str(self.offset), self.get_raw_path(), self._paths['vss']])
except Exception as e:
logger.exception("Failed mounting the volume shadow copies.")
raise SubsystemError(e)
else:
return self.volumes.detect_volumes(vstype='vss') | Method to call vshadowmount and mount NTFS volume shadow copies.
:return: iterable with the :class:`Volume` objects of the VSS
:raises CommandNotFoundError: if the underlying command does not exist
:raises SubSystemError: if the underlying command fails
:raises NoMountpointAvailableError: if there is no mountpoint available | Below is the the instruction that describes the task:
### Input:
Method to call vshadowmount and mount NTFS volume shadow copies.
:return: iterable with the :class:`Volume` objects of the VSS
:raises CommandNotFoundError: if the underlying command does not exist
:raises SubSystemError: if the underlying command fails
:raises NoMountpointAvailableError: if there is no mountpoint available
### Response:
def detect_volume_shadow_copies(self):
"""Method to call vshadowmount and mount NTFS volume shadow copies.
:return: iterable with the :class:`Volume` objects of the VSS
:raises CommandNotFoundError: if the underlying command does not exist
:raises SubSystemError: if the underlying command fails
:raises NoMountpointAvailableError: if there is no mountpoint available
"""
self._make_mountpoint(var_name='vss', suffix="vss", in_paths=True)
try:
_util.check_call_(["vshadowmount", "-o", str(self.offset), self.get_raw_path(), self._paths['vss']])
except Exception as e:
logger.exception("Failed mounting the volume shadow copies.")
raise SubsystemError(e)
else:
return self.volumes.detect_volumes(vstype='vss') |
def format(self, vertices):
"""Format instance to dump
vertices is dict of name to Vertex
"""
index = ' '.join(str(vertices[vn].index) for vn in self.vnames)
vcom = ' '.join(self.vnames) # for comment
return 'hex ({0:s}) {2:s} ({1[0]:d} {1[1]:d} {1[2]:d}) '\
'{4:s} // {2:s} ({3:s})'.format(
index, self.cells, self.name, vcom, self.grading.format()) | Format instance to dump
vertices is dict of name to Vertex | Below is the the instruction that describes the task:
### Input:
Format instance to dump
vertices is dict of name to Vertex
### Response:
def format(self, vertices):
"""Format instance to dump
vertices is dict of name to Vertex
"""
index = ' '.join(str(vertices[vn].index) for vn in self.vnames)
vcom = ' '.join(self.vnames) # for comment
return 'hex ({0:s}) {2:s} ({1[0]:d} {1[1]:d} {1[2]:d}) '\
'{4:s} // {2:s} ({3:s})'.format(
index, self.cells, self.name, vcom, self.grading.format()) |
def get_context_data(self, **kwargs):
""" Returns the context data to provide to the template. """
context = super().get_context_data(**kwargs)
post = self.object
topic = post.topic
# Handles the case when a poll is associated to the topic.
try:
if hasattr(topic, 'poll') and topic.poll.options.exists():
poll = topic.poll
context['poll'] = poll
context['poll_options'] = poll.options.all()
except ObjectDoesNotExist: # pragma: no cover
pass
if not post.is_topic_head:
# Add the topic review
previous_posts = (
topic.posts
.filter(approved=True, created__lte=post.created)
.select_related('poster', 'updated_by')
.prefetch_related('attachments', 'poster__forum_profile')
.order_by('-created')
)
previous_posts = previous_posts[:machina_settings.TOPIC_REVIEW_POSTS_NUMBER]
context['previous_posts'] = previous_posts
return context | Returns the context data to provide to the template. | Below is the the instruction that describes the task:
### Input:
Returns the context data to provide to the template.
### Response:
def get_context_data(self, **kwargs):
""" Returns the context data to provide to the template. """
context = super().get_context_data(**kwargs)
post = self.object
topic = post.topic
# Handles the case when a poll is associated to the topic.
try:
if hasattr(topic, 'poll') and topic.poll.options.exists():
poll = topic.poll
context['poll'] = poll
context['poll_options'] = poll.options.all()
except ObjectDoesNotExist: # pragma: no cover
pass
if not post.is_topic_head:
# Add the topic review
previous_posts = (
topic.posts
.filter(approved=True, created__lte=post.created)
.select_related('poster', 'updated_by')
.prefetch_related('attachments', 'poster__forum_profile')
.order_by('-created')
)
previous_posts = previous_posts[:machina_settings.TOPIC_REVIEW_POSTS_NUMBER]
context['previous_posts'] = previous_posts
return context |
def get_mac_dot_app_dir(directory):
"""Returns parent directory of mac .app
Args:
directory (str): Current directory
Returns:
(str): Parent directory of mac .app
"""
return os.path.dirname(os.path.dirname(os.path.dirname(directory))) | Returns parent directory of mac .app
Args:
directory (str): Current directory
Returns:
(str): Parent directory of mac .app | Below is the the instruction that describes the task:
### Input:
Returns parent directory of mac .app
Args:
directory (str): Current directory
Returns:
(str): Parent directory of mac .app
### Response:
def get_mac_dot_app_dir(directory):
"""Returns parent directory of mac .app
Args:
directory (str): Current directory
Returns:
(str): Parent directory of mac .app
"""
return os.path.dirname(os.path.dirname(os.path.dirname(directory))) |
def checkCorpNums(self, MemberCorpNum, CorpNumList):
""" ν΄νμ
μ‘°ν λλ νμΈ, μ΅λ 1000건
args
MemberCorpNum : νλΉνμ μ¬μ
μλ²νΈ
CorpNumList : μ‘°νν μ¬μ
μλ²νΈ λ°°μ΄
return
ν΄νμ
μ 보 Object as List
raise
PopbillException
"""
if CorpNumList == None or len(CorpNumList) < 1:
raise PopbillException(-99999999,"μ‘°μ£ν μ¬μ
μλ²νΈ λͺ©λ‘μ΄ μ
λ ₯λμ§ μμμ΅λλ€.")
postData = self._stringtify(CorpNumList)
return self._httppost('/CloseDown',postData,MemberCorpNum) | ν΄νμ
μ‘°ν λλ νμΈ, μ΅λ 1000건
args
MemberCorpNum : νλΉνμ μ¬μ
μλ²νΈ
CorpNumList : μ‘°νν μ¬μ
μλ²νΈ λ°°μ΄
return
ν΄νμ
μ 보 Object as List
raise
PopbillException | Below is the the instruction that describes the task:
### Input:
ν΄νμ
μ‘°ν λλ νμΈ, μ΅λ 1000건
args
MemberCorpNum : νλΉνμ μ¬μ
μλ²νΈ
CorpNumList : μ‘°νν μ¬μ
μλ²νΈ λ°°μ΄
return
ν΄νμ
μ 보 Object as List
raise
PopbillException
### Response:
def checkCorpNums(self, MemberCorpNum, CorpNumList):
""" ν΄νμ
μ‘°ν λλ νμΈ, μ΅λ 1000건
args
MemberCorpNum : νλΉνμ μ¬μ
μλ²νΈ
CorpNumList : μ‘°νν μ¬μ
μλ²νΈ λ°°μ΄
return
ν΄νμ
μ 보 Object as List
raise
PopbillException
"""
if CorpNumList == None or len(CorpNumList) < 1:
raise PopbillException(-99999999,"μ‘°μ£ν μ¬μ
μλ²νΈ λͺ©λ‘μ΄ μ
λ ₯λμ§ μμμ΅λλ€.")
postData = self._stringtify(CorpNumList)
return self._httppost('/CloseDown',postData,MemberCorpNum) |
def SLH_to_qutip(slh, full_space=None, time_symbol=None,
convert_as='pyfunc'):
"""Generate and return QuTiP representation matrices for the Hamiltonian
and the collapse operators. Any inhomogeneities in the Lindblad operators
(resulting from coherent drives) will be moved into the Hamiltonian, cf.
:func:`~qnet.algebra.circuit_algebra.move_drive_to_H`.
Args:
slh (SLH): The SLH object from which to generate the qutip data
full_space (HilbertSpace or None): The Hilbert space in which to
represent the operators. If None, the space of `shl` will be used
time_symbol (:class:`sympy.Symbol` or None): The symbol (if any)
expressing time dependence (usually 't')
convert_as (str): How to express time dependencies to qutip. Must be
'pyfunc' or 'str'
Returns:
tuple ``(H, [L1, L2, ...])`` as numerical `qutip.Qobj` representations,
where ``H`` and each ``L`` may be a nested list to express time
dependence, e.g. ``H = [H0, [H1, eps_t]]``, where ``H0`` and
``H1`` are of type `qutip.Qobj`, and ``eps_t`` is either a string
(``convert_as='str'``) or a function (``convert_as='pyfunc'``)
Raises:
AlgebraError: If the Hilbert space (`slh.space` or `full_space`) is
invalid for numerical conversion
"""
if full_space:
if not full_space >= slh.space:
raise AlgebraError("full_space="+str(full_space)+" needs to "
"at least include slh.space = "+str(slh.space))
else:
full_space = slh.space
if full_space == TrivialSpace:
raise AlgebraError(
"Cannot convert SLH object in TrivialSpace. "
"You may pass a non-trivial `full_space`")
slh = move_drive_to_H(slh)
if time_symbol is None:
H = convert_to_qutip(slh.H, full_space=full_space)
Ls = []
for L in slh.Ls:
if is_scalar(L):
L = L * IdentityOperator
L_qutip = convert_to_qutip(L, full_space=full_space)
if L_qutip.norm('max') > 0:
Ls.append(L_qutip)
else:
H = _time_dependent_to_qutip(slh.H, full_space, time_symbol,
convert_as)
Ls = []
for L in slh.Ls:
if is_scalar(L):
L = L * IdentityOperator
L_qutip = _time_dependent_to_qutip(L, full_space, time_symbol,
convert_as)
Ls.append(L_qutip)
return H, Ls | Generate and return QuTiP representation matrices for the Hamiltonian
and the collapse operators. Any inhomogeneities in the Lindblad operators
(resulting from coherent drives) will be moved into the Hamiltonian, cf.
:func:`~qnet.algebra.circuit_algebra.move_drive_to_H`.
Args:
slh (SLH): The SLH object from which to generate the qutip data
full_space (HilbertSpace or None): The Hilbert space in which to
represent the operators. If None, the space of `shl` will be used
time_symbol (:class:`sympy.Symbol` or None): The symbol (if any)
expressing time dependence (usually 't')
convert_as (str): How to express time dependencies to qutip. Must be
'pyfunc' or 'str'
Returns:
tuple ``(H, [L1, L2, ...])`` as numerical `qutip.Qobj` representations,
where ``H`` and each ``L`` may be a nested list to express time
dependence, e.g. ``H = [H0, [H1, eps_t]]``, where ``H0`` and
``H1`` are of type `qutip.Qobj`, and ``eps_t`` is either a string
(``convert_as='str'``) or a function (``convert_as='pyfunc'``)
Raises:
AlgebraError: If the Hilbert space (`slh.space` or `full_space`) is
invalid for numerical conversion | Below is the the instruction that describes the task:
### Input:
Generate and return QuTiP representation matrices for the Hamiltonian
and the collapse operators. Any inhomogeneities in the Lindblad operators
(resulting from coherent drives) will be moved into the Hamiltonian, cf.
:func:`~qnet.algebra.circuit_algebra.move_drive_to_H`.
Args:
slh (SLH): The SLH object from which to generate the qutip data
full_space (HilbertSpace or None): The Hilbert space in which to
represent the operators. If None, the space of `shl` will be used
time_symbol (:class:`sympy.Symbol` or None): The symbol (if any)
expressing time dependence (usually 't')
convert_as (str): How to express time dependencies to qutip. Must be
'pyfunc' or 'str'
Returns:
tuple ``(H, [L1, L2, ...])`` as numerical `qutip.Qobj` representations,
where ``H`` and each ``L`` may be a nested list to express time
dependence, e.g. ``H = [H0, [H1, eps_t]]``, where ``H0`` and
``H1`` are of type `qutip.Qobj`, and ``eps_t`` is either a string
(``convert_as='str'``) or a function (``convert_as='pyfunc'``)
Raises:
AlgebraError: If the Hilbert space (`slh.space` or `full_space`) is
invalid for numerical conversion
### Response:
def SLH_to_qutip(slh, full_space=None, time_symbol=None,
convert_as='pyfunc'):
"""Generate and return QuTiP representation matrices for the Hamiltonian
and the collapse operators. Any inhomogeneities in the Lindblad operators
(resulting from coherent drives) will be moved into the Hamiltonian, cf.
:func:`~qnet.algebra.circuit_algebra.move_drive_to_H`.
Args:
slh (SLH): The SLH object from which to generate the qutip data
full_space (HilbertSpace or None): The Hilbert space in which to
represent the operators. If None, the space of `shl` will be used
time_symbol (:class:`sympy.Symbol` or None): The symbol (if any)
expressing time dependence (usually 't')
convert_as (str): How to express time dependencies to qutip. Must be
'pyfunc' or 'str'
Returns:
tuple ``(H, [L1, L2, ...])`` as numerical `qutip.Qobj` representations,
where ``H`` and each ``L`` may be a nested list to express time
dependence, e.g. ``H = [H0, [H1, eps_t]]``, where ``H0`` and
``H1`` are of type `qutip.Qobj`, and ``eps_t`` is either a string
(``convert_as='str'``) or a function (``convert_as='pyfunc'``)
Raises:
AlgebraError: If the Hilbert space (`slh.space` or `full_space`) is
invalid for numerical conversion
"""
if full_space:
if not full_space >= slh.space:
raise AlgebraError("full_space="+str(full_space)+" needs to "
"at least include slh.space = "+str(slh.space))
else:
full_space = slh.space
if full_space == TrivialSpace:
raise AlgebraError(
"Cannot convert SLH object in TrivialSpace. "
"You may pass a non-trivial `full_space`")
slh = move_drive_to_H(slh)
if time_symbol is None:
H = convert_to_qutip(slh.H, full_space=full_space)
Ls = []
for L in slh.Ls:
if is_scalar(L):
L = L * IdentityOperator
L_qutip = convert_to_qutip(L, full_space=full_space)
if L_qutip.norm('max') > 0:
Ls.append(L_qutip)
else:
H = _time_dependent_to_qutip(slh.H, full_space, time_symbol,
convert_as)
Ls = []
for L in slh.Ls:
if is_scalar(L):
L = L * IdentityOperator
L_qutip = _time_dependent_to_qutip(L, full_space, time_symbol,
convert_as)
Ls.append(L_qutip)
return H, Ls |
def register(self, name, option):
"""Register a new option with the namespace.
Args:
name (str): The name to register the option under.
option (option.Option): The option object to register.
Raises:
TypeError: If the option is not an option.Option object.
ValueError: If the name is already registered.
"""
if name in self._options:
raise ValueError("Option {0} already exists.".format(name))
if not isinstance(option, opt.Option):
raise TypeError("Options must be of type Option.")
self._options[name] = option | Register a new option with the namespace.
Args:
name (str): The name to register the option under.
option (option.Option): The option object to register.
Raises:
TypeError: If the option is not an option.Option object.
ValueError: If the name is already registered. | Below is the the instruction that describes the task:
### Input:
Register a new option with the namespace.
Args:
name (str): The name to register the option under.
option (option.Option): The option object to register.
Raises:
TypeError: If the option is not an option.Option object.
ValueError: If the name is already registered.
### Response:
def register(self, name, option):
"""Register a new option with the namespace.
Args:
name (str): The name to register the option under.
option (option.Option): The option object to register.
Raises:
TypeError: If the option is not an option.Option object.
ValueError: If the name is already registered.
"""
if name in self._options:
raise ValueError("Option {0} already exists.".format(name))
if not isinstance(option, opt.Option):
raise TypeError("Options must be of type Option.")
self._options[name] = option |
def get_content(self, offset, size):
"""Return the specified number of bytes from the current section."""
return _bfd.section_get_content(self.bfd, self._ptr, offset, size) | Return the specified number of bytes from the current section. | Below is the the instruction that describes the task:
### Input:
Return the specified number of bytes from the current section.
### Response:
def get_content(self, offset, size):
"""Return the specified number of bytes from the current section."""
return _bfd.section_get_content(self.bfd, self._ptr, offset, size) |
def load_chkpt_vars(model_path):
""" Load all variables from a checkpoint to a dict.
Args:
model_path(str): path to a checkpoint.
Returns:
dict: a name:value dict
"""
model_path = get_checkpoint_path(model_path)
reader = tfv1.train.NewCheckpointReader(model_path)
var_names = reader.get_variable_to_shape_map().keys()
result = {}
for n in var_names:
result[n] = reader.get_tensor(n)
return result | Load all variables from a checkpoint to a dict.
Args:
model_path(str): path to a checkpoint.
Returns:
dict: a name:value dict | Below is the the instruction that describes the task:
### Input:
Load all variables from a checkpoint to a dict.
Args:
model_path(str): path to a checkpoint.
Returns:
dict: a name:value dict
### Response:
def load_chkpt_vars(model_path):
""" Load all variables from a checkpoint to a dict.
Args:
model_path(str): path to a checkpoint.
Returns:
dict: a name:value dict
"""
model_path = get_checkpoint_path(model_path)
reader = tfv1.train.NewCheckpointReader(model_path)
var_names = reader.get_variable_to_shape_map().keys()
result = {}
for n in var_names:
result[n] = reader.get_tensor(n)
return result |
def get_resource_object_doc_lines() -> List[str]:
"""Generate documentation lines for all collected resource objects.
As API documentation is generated we keep a running list of objects used
in request parameters and responses. This section will generate
documentation for each object and provide an inline reference in the API
documentation.
:returns: A list of lines required to generate the documentation.
"""
# First loop through all resources and make sure to add any properties that
# are objects and not already in `ALL_RESOURCES`. We iterate over a copy
# since we will be modifying the dict during the loop.
for resource_name, a_type in ALL_RESOURCES.copy().items():
for prop_a_type in a_type.properties.values():
if issubclass(prop_a_type, Object):
resource_name = prop_a_type.title
if resource_name is None:
class_name = prop_a_type.__name__
resource_name = class_name_to_resource_name(class_name)
ALL_RESOURCES[resource_name] = prop_a_type
elif (issubclass(prop_a_type, Array) and
prop_a_type.items is not None and
not isinstance(prop_a_type.items, list) and
issubclass(prop_a_type.items, Object)):
# This means the type is an array of objects, so we want to
# collect the object as a resource we can document later.
resource_name = prop_a_type.items.title
if resource_name is None:
class_name = prop_a_type.items.__name__
resource_name = class_name_to_resource_name(class_name)
ALL_RESOURCES[resource_name] = prop_a_type.items
# If we don't have any resources to document, just return.
if not ALL_RESOURCES:
return []
lines = ['Resource Objects', '----------------']
for resource_name in sorted(ALL_RESOURCES.keys()):
a_type = ALL_RESOURCES[resource_name]
# First add a reference to the resource
resource_ref = '_resource-{}'.format(
'-'.join(resource_name.lower().split(' ')))
lines.extend(['.. {}:'.format(resource_ref), ''])
# Add resource name heading
lines.extend([resource_name, '#' * len(resource_name)])
# Add resource description
lines.extend([a_type.description, ''])
# Only document attributes if it has properties defined.
if a_type.properties:
# Add attributes documentation.
lines.extend(['Attributes', '**********'])
for prop in a_type.properties:
prop_a_type = a_type.properties[prop]
description = a_type.properties[prop].description.strip()
# Add any object reference if the property is an object or
# an array of objects.
obj_ref = ''
if issubclass(prop_a_type, Object):
obj_ref = get_object_reference(prop_a_type)
elif (issubclass(prop_a_type, Array) and
prop_a_type.items is not None and
not isinstance(prop_a_type.items, list) and
issubclass(prop_a_type.items, Object)):
# This means the type is an array of objects.
obj_ref = get_object_reference(prop_a_type.items)
elif (issubclass(prop_a_type, Array) and
prop_a_type.items is not None):
description += get_array_items_description(prop_a_type)
native_type = a_type.properties[prop].native_type.__name__
if prop in a_type.required:
description = '**Required**. ' + description
lines.append('* **{}** (*{}*) - {}{}'.format(
prop, native_type, description, obj_ref).strip())
lines.append('')
# Add example of object.
lines.extend(['Example', '*******'])
example = a_type.get_example()
pretty_json = json.dumps(example, separators=(',', ': '), indent=4,
sort_keys=True)
pretty_json_lines = prefix_lines(pretty_json, ' ')
lines.extend(['.. code-block:: json', ''])
lines.extend(pretty_json_lines)
return lines | Generate documentation lines for all collected resource objects.
As API documentation is generated we keep a running list of objects used
in request parameters and responses. This section will generate
documentation for each object and provide an inline reference in the API
documentation.
:returns: A list of lines required to generate the documentation. | Below is the the instruction that describes the task:
### Input:
Generate documentation lines for all collected resource objects.
As API documentation is generated we keep a running list of objects used
in request parameters and responses. This section will generate
documentation for each object and provide an inline reference in the API
documentation.
:returns: A list of lines required to generate the documentation.
### Response:
def get_resource_object_doc_lines() -> List[str]:
"""Generate documentation lines for all collected resource objects.
As API documentation is generated we keep a running list of objects used
in request parameters and responses. This section will generate
documentation for each object and provide an inline reference in the API
documentation.
:returns: A list of lines required to generate the documentation.
"""
# First loop through all resources and make sure to add any properties that
# are objects and not already in `ALL_RESOURCES`. We iterate over a copy
# since we will be modifying the dict during the loop.
for resource_name, a_type in ALL_RESOURCES.copy().items():
for prop_a_type in a_type.properties.values():
if issubclass(prop_a_type, Object):
resource_name = prop_a_type.title
if resource_name is None:
class_name = prop_a_type.__name__
resource_name = class_name_to_resource_name(class_name)
ALL_RESOURCES[resource_name] = prop_a_type
elif (issubclass(prop_a_type, Array) and
prop_a_type.items is not None and
not isinstance(prop_a_type.items, list) and
issubclass(prop_a_type.items, Object)):
# This means the type is an array of objects, so we want to
# collect the object as a resource we can document later.
resource_name = prop_a_type.items.title
if resource_name is None:
class_name = prop_a_type.items.__name__
resource_name = class_name_to_resource_name(class_name)
ALL_RESOURCES[resource_name] = prop_a_type.items
# If we don't have any resources to document, just return.
if not ALL_RESOURCES:
return []
lines = ['Resource Objects', '----------------']
for resource_name in sorted(ALL_RESOURCES.keys()):
a_type = ALL_RESOURCES[resource_name]
# First add a reference to the resource
resource_ref = '_resource-{}'.format(
'-'.join(resource_name.lower().split(' ')))
lines.extend(['.. {}:'.format(resource_ref), ''])
# Add resource name heading
lines.extend([resource_name, '#' * len(resource_name)])
# Add resource description
lines.extend([a_type.description, ''])
# Only document attributes if it has properties defined.
if a_type.properties:
# Add attributes documentation.
lines.extend(['Attributes', '**********'])
for prop in a_type.properties:
prop_a_type = a_type.properties[prop]
description = a_type.properties[prop].description.strip()
# Add any object reference if the property is an object or
# an array of objects.
obj_ref = ''
if issubclass(prop_a_type, Object):
obj_ref = get_object_reference(prop_a_type)
elif (issubclass(prop_a_type, Array) and
prop_a_type.items is not None and
not isinstance(prop_a_type.items, list) and
issubclass(prop_a_type.items, Object)):
# This means the type is an array of objects.
obj_ref = get_object_reference(prop_a_type.items)
elif (issubclass(prop_a_type, Array) and
prop_a_type.items is not None):
description += get_array_items_description(prop_a_type)
native_type = a_type.properties[prop].native_type.__name__
if prop in a_type.required:
description = '**Required**. ' + description
lines.append('* **{}** (*{}*) - {}{}'.format(
prop, native_type, description, obj_ref).strip())
lines.append('')
# Add example of object.
lines.extend(['Example', '*******'])
example = a_type.get_example()
pretty_json = json.dumps(example, separators=(',', ': '), indent=4,
sort_keys=True)
pretty_json_lines = prefix_lines(pretty_json, ' ')
lines.extend(['.. code-block:: json', ''])
lines.extend(pretty_json_lines)
return lines |
def store_file(self, folder, name):
"""Stores the uploaded file in the given path."""
path = os.path.join(folder, name)
length = self.headers['content-length']
with open(path, 'wb') as sample:
sample.write(self.rfile.read(int(length)))
return path | Stores the uploaded file in the given path. | Below is the the instruction that describes the task:
### Input:
Stores the uploaded file in the given path.
### Response:
def store_file(self, folder, name):
"""Stores the uploaded file in the given path."""
path = os.path.join(folder, name)
length = self.headers['content-length']
with open(path, 'wb') as sample:
sample.write(self.rfile.read(int(length)))
return path |
def for_model(self, model):
"""
QuerySet for all comments for a particular model (either an instance or
a class).
"""
ct = ContentType.objects.get_for_model(model)
qs = self.get_queryset().filter(content_type=ct)
if isinstance(model, models.Model):
qs = qs.filter(object_pk=force_text(model._get_pk_val()))
return qs | QuerySet for all comments for a particular model (either an instance or
a class). | Below is the the instruction that describes the task:
### Input:
QuerySet for all comments for a particular model (either an instance or
a class).
### Response:
def for_model(self, model):
"""
QuerySet for all comments for a particular model (either an instance or
a class).
"""
ct = ContentType.objects.get_for_model(model)
qs = self.get_queryset().filter(content_type=ct)
if isinstance(model, models.Model):
qs = qs.filter(object_pk=force_text(model._get_pk_val()))
return qs |
def create(self, name=None, **kwargs):
"""Create a new project.
:param name: The name of the project.
:returns: An instance of the newly create project.
:rtype: renku.models.projects.Project
"""
data = self._client.api.create_project({'name': name})
return self.Meta.model(data, client=self._client, collection=self) | Create a new project.
:param name: The name of the project.
:returns: An instance of the newly create project.
:rtype: renku.models.projects.Project | Below is the the instruction that describes the task:
### Input:
Create a new project.
:param name: The name of the project.
:returns: An instance of the newly create project.
:rtype: renku.models.projects.Project
### Response:
def create(self, name=None, **kwargs):
"""Create a new project.
:param name: The name of the project.
:returns: An instance of the newly create project.
:rtype: renku.models.projects.Project
"""
data = self._client.api.create_project({'name': name})
return self.Meta.model(data, client=self._client, collection=self) |
def get_counter(self, transport, bucket, key, r=None, pr=None,
basic_quorum=None, notfound_ok=None):
"""get_counter(bucket, key, r=None, pr=None, basic_quorum=None,\
notfound_ok=None)
Gets the value of a counter.
.. deprecated:: 2.1.0 (Riak 2.0) Riak 1.4-style counters are
deprecated in favor of the :class:`~riak.datatypes.Counter`
datatype.
.. note:: This request is automatically retried :attr:`retries`
times if it fails due to network error.
:param bucket: the bucket of the counter
:type bucket: RiakBucket
:param key: the key of the counter
:type key: string
:param r: the read quorum
:type r: integer, string, None
:param pr: the primary read quorum
:type pr: integer, string, None
:param basic_quorum: whether to use the "basic quorum" policy
for not-founds
:type basic_quorum: bool
:param notfound_ok: whether to treat not-found responses as successful
:type notfound_ok: bool
:rtype: integer
"""
return transport.get_counter(bucket, key, r=r, pr=pr) | get_counter(bucket, key, r=None, pr=None, basic_quorum=None,\
notfound_ok=None)
Gets the value of a counter.
.. deprecated:: 2.1.0 (Riak 2.0) Riak 1.4-style counters are
deprecated in favor of the :class:`~riak.datatypes.Counter`
datatype.
.. note:: This request is automatically retried :attr:`retries`
times if it fails due to network error.
:param bucket: the bucket of the counter
:type bucket: RiakBucket
:param key: the key of the counter
:type key: string
:param r: the read quorum
:type r: integer, string, None
:param pr: the primary read quorum
:type pr: integer, string, None
:param basic_quorum: whether to use the "basic quorum" policy
for not-founds
:type basic_quorum: bool
:param notfound_ok: whether to treat not-found responses as successful
:type notfound_ok: bool
:rtype: integer | Below is the the instruction that describes the task:
### Input:
get_counter(bucket, key, r=None, pr=None, basic_quorum=None,\
notfound_ok=None)
Gets the value of a counter.
.. deprecated:: 2.1.0 (Riak 2.0) Riak 1.4-style counters are
deprecated in favor of the :class:`~riak.datatypes.Counter`
datatype.
.. note:: This request is automatically retried :attr:`retries`
times if it fails due to network error.
:param bucket: the bucket of the counter
:type bucket: RiakBucket
:param key: the key of the counter
:type key: string
:param r: the read quorum
:type r: integer, string, None
:param pr: the primary read quorum
:type pr: integer, string, None
:param basic_quorum: whether to use the "basic quorum" policy
for not-founds
:type basic_quorum: bool
:param notfound_ok: whether to treat not-found responses as successful
:type notfound_ok: bool
:rtype: integer
### Response:
def get_counter(self, transport, bucket, key, r=None, pr=None,
basic_quorum=None, notfound_ok=None):
"""get_counter(bucket, key, r=None, pr=None, basic_quorum=None,\
notfound_ok=None)
Gets the value of a counter.
.. deprecated:: 2.1.0 (Riak 2.0) Riak 1.4-style counters are
deprecated in favor of the :class:`~riak.datatypes.Counter`
datatype.
.. note:: This request is automatically retried :attr:`retries`
times if it fails due to network error.
:param bucket: the bucket of the counter
:type bucket: RiakBucket
:param key: the key of the counter
:type key: string
:param r: the read quorum
:type r: integer, string, None
:param pr: the primary read quorum
:type pr: integer, string, None
:param basic_quorum: whether to use the "basic quorum" policy
for not-founds
:type basic_quorum: bool
:param notfound_ok: whether to treat not-found responses as successful
:type notfound_ok: bool
:rtype: integer
"""
return transport.get_counter(bucket, key, r=r, pr=pr) |
def visible_width(string):
"""Get the visible width of a unicode string.
Some CJK unicode characters are more than one byte unlike ASCII and latin unicode characters.
From: https://github.com/Robpol86/terminaltables/pull/9
:param str string: String to measure.
:return: String's width.
:rtype: int
"""
if '\033' in string:
string = RE_COLOR_ANSI.sub('', string)
# Convert to unicode.
try:
string = string.decode('u8')
except (AttributeError, UnicodeEncodeError):
pass
width = 0
for char in string:
if unicodedata.east_asian_width(char) in ('F', 'W'):
width += 2
else:
width += 1
return width | Get the visible width of a unicode string.
Some CJK unicode characters are more than one byte unlike ASCII and latin unicode characters.
From: https://github.com/Robpol86/terminaltables/pull/9
:param str string: String to measure.
:return: String's width.
:rtype: int | Below is the the instruction that describes the task:
### Input:
Get the visible width of a unicode string.
Some CJK unicode characters are more than one byte unlike ASCII and latin unicode characters.
From: https://github.com/Robpol86/terminaltables/pull/9
:param str string: String to measure.
:return: String's width.
:rtype: int
### Response:
def visible_width(string):
"""Get the visible width of a unicode string.
Some CJK unicode characters are more than one byte unlike ASCII and latin unicode characters.
From: https://github.com/Robpol86/terminaltables/pull/9
:param str string: String to measure.
:return: String's width.
:rtype: int
"""
if '\033' in string:
string = RE_COLOR_ANSI.sub('', string)
# Convert to unicode.
try:
string = string.decode('u8')
except (AttributeError, UnicodeEncodeError):
pass
width = 0
for char in string:
if unicodedata.east_asian_width(char) in ('F', 'W'):
width += 2
else:
width += 1
return width |
def _fix_sitk_bug(path, metadata):
"""There is a bug in simple ITK for Z axis in 3D images. This is a fix.
:param path: path to dicom file to read
:param metadata: metadata to correct
:return: corrected metadata
"""
ds = dicom.read_file(path)
try:
metadata["voxelsize_mm"][0] = ds.SpacingBetweenSlices
except Exception as e:
logger.warning("Read dicom 'SpacingBetweenSlices' failed: ", e)
return metadata | There is a bug in simple ITK for Z axis in 3D images. This is a fix.
:param path: path to dicom file to read
:param metadata: metadata to correct
:return: corrected metadata | Below is the the instruction that describes the task:
### Input:
There is a bug in simple ITK for Z axis in 3D images. This is a fix.
:param path: path to dicom file to read
:param metadata: metadata to correct
:return: corrected metadata
### Response:
def _fix_sitk_bug(path, metadata):
"""There is a bug in simple ITK for Z axis in 3D images. This is a fix.
:param path: path to dicom file to read
:param metadata: metadata to correct
:return: corrected metadata
"""
ds = dicom.read_file(path)
try:
metadata["voxelsize_mm"][0] = ds.SpacingBetweenSlices
except Exception as e:
logger.warning("Read dicom 'SpacingBetweenSlices' failed: ", e)
return metadata |
def check_md5(filename, stored_md5):
"""
Computes the md5 of filename and check if it matches with the supplied
string md5
Input
-----
filename : string
Path to a file.
md5 : string
Known md5 of filename to check against.
"""
computed_md5 = _get_file_md5(filename)
if stored_md5 != computed_md5:
print ("MD5 checksum of filename", filename,
"failed. Expected MD5 was", stored_md5,
"but computed MD5 was", computed_md5, '\n',
"Please check if the data has been downloaded correctly or if the upstream data has changed.") | Computes the md5 of filename and check if it matches with the supplied
string md5
Input
-----
filename : string
Path to a file.
md5 : string
Known md5 of filename to check against. | Below is the the instruction that describes the task:
### Input:
Computes the md5 of filename and check if it matches with the supplied
string md5
Input
-----
filename : string
Path to a file.
md5 : string
Known md5 of filename to check against.
### Response:
def check_md5(filename, stored_md5):
"""
Computes the md5 of filename and check if it matches with the supplied
string md5
Input
-----
filename : string
Path to a file.
md5 : string
Known md5 of filename to check against.
"""
computed_md5 = _get_file_md5(filename)
if stored_md5 != computed_md5:
print ("MD5 checksum of filename", filename,
"failed. Expected MD5 was", stored_md5,
"but computed MD5 was", computed_md5, '\n',
"Please check if the data has been downloaded correctly or if the upstream data has changed.") |
def db_get(name, **connection_args):
'''
Return a list of databases of a MySQL server using the output
from the ``SELECT DEFAULT_CHARACTER_SET_NAME, DEFAULT_COLLATION_NAME FROM
INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME='dbname';`` query.
CLI Example:
.. code-block:: bash
salt '*' mysql.db_get test
'''
dbc = _connect(**connection_args)
if dbc is None:
return []
cur = dbc.cursor()
qry = ('SELECT DEFAULT_CHARACTER_SET_NAME, DEFAULT_COLLATION_NAME FROM '
'INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME=%(dbname)s;')
args = {"dbname": name}
_execute(cur, qry, args)
if cur.rowcount:
rows = cur.fetchall()
return {'character_set': rows[0][0],
'collate': rows[0][1]}
return {} | Return a list of databases of a MySQL server using the output
from the ``SELECT DEFAULT_CHARACTER_SET_NAME, DEFAULT_COLLATION_NAME FROM
INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME='dbname';`` query.
CLI Example:
.. code-block:: bash
salt '*' mysql.db_get test | Below is the the instruction that describes the task:
### Input:
Return a list of databases of a MySQL server using the output
from the ``SELECT DEFAULT_CHARACTER_SET_NAME, DEFAULT_COLLATION_NAME FROM
INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME='dbname';`` query.
CLI Example:
.. code-block:: bash
salt '*' mysql.db_get test
### Response:
def db_get(name, **connection_args):
'''
Return a list of databases of a MySQL server using the output
from the ``SELECT DEFAULT_CHARACTER_SET_NAME, DEFAULT_COLLATION_NAME FROM
INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME='dbname';`` query.
CLI Example:
.. code-block:: bash
salt '*' mysql.db_get test
'''
dbc = _connect(**connection_args)
if dbc is None:
return []
cur = dbc.cursor()
qry = ('SELECT DEFAULT_CHARACTER_SET_NAME, DEFAULT_COLLATION_NAME FROM '
'INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME=%(dbname)s;')
args = {"dbname": name}
_execute(cur, qry, args)
if cur.rowcount:
rows = cur.fetchall()
return {'character_set': rows[0][0],
'collate': rows[0][1]}
return {} |
Subsets and Splits