code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def updateUserTone(conversationPayload, toneAnalyzerPayload, maintainHistory):
"""
updateUserTone processes the Tone Analyzer payload to pull out the emotion,
writing and social tones, and identify the meaningful tones (i.e.,
those tones that meet the specified thresholds).
The conversationPayload json object is updated to include these tones.
@param conversationPayload json object returned by the Watson Conversation
Service
@param toneAnalyzerPayload json object returned by the Watson Tone Analyzer
Service
@returns conversationPayload where the user object has been updated with tone
information from the toneAnalyzerPayload
"""
emotionTone = None
writingTone = None
socialTone = None
# if there is no context in a
if 'context' not in conversationPayload:
conversationPayload['context'] = {}
if 'user' not in conversationPayload['context']:
conversationPayload['context'] = initUser()
# For convenience sake, define a variable for the user object
user = conversationPayload['context']['user']
# Extract the tones - emotion, writing and social
if toneAnalyzerPayload and toneAnalyzerPayload['document_tone']:
for toneCategory in toneAnalyzerPayload['document_tone']['tone_categories']:
if toneCategory['category_id'] == EMOTION_TONE_LABEL:
emotionTone = toneCategory
if toneCategory['category_id'] == LANGUAGE_TONE_LABEL:
writingTone = toneCategory
if toneCategory['category_id'] == SOCIAL_TONE_LABEL:
socialTone = toneCategory
updateEmotionTone(user, emotionTone, maintainHistory)
updateWritingTone(user, writingTone, maintainHistory)
updateSocialTone(user, socialTone, maintainHistory)
conversationPayload['context']['user'] = user
return conversationPayload | updateUserTone processes the Tone Analyzer payload to pull out the emotion,
writing and social tones, and identify the meaningful tones (i.e.,
those tones that meet the specified thresholds).
The conversationPayload json object is updated to include these tones.
@param conversationPayload json object returned by the Watson Conversation
Service
@param toneAnalyzerPayload json object returned by the Watson Tone Analyzer
Service
@returns conversationPayload where the user object has been updated with tone
information from the toneAnalyzerPayload | Below is the the instruction that describes the task:
### Input:
updateUserTone processes the Tone Analyzer payload to pull out the emotion,
writing and social tones, and identify the meaningful tones (i.e.,
those tones that meet the specified thresholds).
The conversationPayload json object is updated to include these tones.
@param conversationPayload json object returned by the Watson Conversation
Service
@param toneAnalyzerPayload json object returned by the Watson Tone Analyzer
Service
@returns conversationPayload where the user object has been updated with tone
information from the toneAnalyzerPayload
### Response:
def updateUserTone(conversationPayload, toneAnalyzerPayload, maintainHistory):
"""
updateUserTone processes the Tone Analyzer payload to pull out the emotion,
writing and social tones, and identify the meaningful tones (i.e.,
those tones that meet the specified thresholds).
The conversationPayload json object is updated to include these tones.
@param conversationPayload json object returned by the Watson Conversation
Service
@param toneAnalyzerPayload json object returned by the Watson Tone Analyzer
Service
@returns conversationPayload where the user object has been updated with tone
information from the toneAnalyzerPayload
"""
emotionTone = None
writingTone = None
socialTone = None
# if there is no context in a
if 'context' not in conversationPayload:
conversationPayload['context'] = {}
if 'user' not in conversationPayload['context']:
conversationPayload['context'] = initUser()
# For convenience sake, define a variable for the user object
user = conversationPayload['context']['user']
# Extract the tones - emotion, writing and social
if toneAnalyzerPayload and toneAnalyzerPayload['document_tone']:
for toneCategory in toneAnalyzerPayload['document_tone']['tone_categories']:
if toneCategory['category_id'] == EMOTION_TONE_LABEL:
emotionTone = toneCategory
if toneCategory['category_id'] == LANGUAGE_TONE_LABEL:
writingTone = toneCategory
if toneCategory['category_id'] == SOCIAL_TONE_LABEL:
socialTone = toneCategory
updateEmotionTone(user, emotionTone, maintainHistory)
updateWritingTone(user, writingTone, maintainHistory)
updateSocialTone(user, socialTone, maintainHistory)
conversationPayload['context']['user'] = user
return conversationPayload |
def contained_bins(start, stop=None):
"""
Given an interval `start:stop`, return bins for intervals completely
*contained by* `start:stop`. The order is according to the bin level
(starting with the smallest bins), and within a level according to the bin
number (ascending).
:arg int start, stop: Interval positions (zero-based, open-ended). If
`stop` is not provided, the interval is assumed to be of length 1
(equivalent to `stop = start + 1`).
:return: All bins for intervals contained by `start:stop`, ordered first
according to bin level (ascending) and then according to bin number
(ascending).
:rtype: list(int)
:raise OutOfRangeError: If `start:stop` exceeds the range of the binning
scheme.
"""
if stop is None:
stop = start + 1
min_bin = assign_bin(start, stop)
return [bin for bin in overlapping_bins(start, stop) if bin >= min_bin] | Given an interval `start:stop`, return bins for intervals completely
*contained by* `start:stop`. The order is according to the bin level
(starting with the smallest bins), and within a level according to the bin
number (ascending).
:arg int start, stop: Interval positions (zero-based, open-ended). If
`stop` is not provided, the interval is assumed to be of length 1
(equivalent to `stop = start + 1`).
:return: All bins for intervals contained by `start:stop`, ordered first
according to bin level (ascending) and then according to bin number
(ascending).
:rtype: list(int)
:raise OutOfRangeError: If `start:stop` exceeds the range of the binning
scheme. | Below is the the instruction that describes the task:
### Input:
Given an interval `start:stop`, return bins for intervals completely
*contained by* `start:stop`. The order is according to the bin level
(starting with the smallest bins), and within a level according to the bin
number (ascending).
:arg int start, stop: Interval positions (zero-based, open-ended). If
`stop` is not provided, the interval is assumed to be of length 1
(equivalent to `stop = start + 1`).
:return: All bins for intervals contained by `start:stop`, ordered first
according to bin level (ascending) and then according to bin number
(ascending).
:rtype: list(int)
:raise OutOfRangeError: If `start:stop` exceeds the range of the binning
scheme.
### Response:
def contained_bins(start, stop=None):
"""
Given an interval `start:stop`, return bins for intervals completely
*contained by* `start:stop`. The order is according to the bin level
(starting with the smallest bins), and within a level according to the bin
number (ascending).
:arg int start, stop: Interval positions (zero-based, open-ended). If
`stop` is not provided, the interval is assumed to be of length 1
(equivalent to `stop = start + 1`).
:return: All bins for intervals contained by `start:stop`, ordered first
according to bin level (ascending) and then according to bin number
(ascending).
:rtype: list(int)
:raise OutOfRangeError: If `start:stop` exceeds the range of the binning
scheme.
"""
if stop is None:
stop = start + 1
min_bin = assign_bin(start, stop)
return [bin for bin in overlapping_bins(start, stop) if bin >= min_bin] |
def _merge_headers(self, call_specific_headers):
"""
Merge headers from different sources together. Headers passed to the
post/get methods have highest priority, then headers associated with
the connection object itself have next priority.
:param call_specific_headers: A header dict from the get/post call, or
None (the default for those methods).
:return: A key-case-insensitive MutableMapping object which contains
the merged headers. (This doesn't actually return a dict.)
"""
# A case-insensitive mapping is necessary here so that there is
# predictable behavior. If a plain dict were used, you'd get keys in
# the merged dict which differ only in case. The requests library
# would merge them internally, and it would be unpredictable which key
# is chosen for the final set of headers. Another possible approach
# would be to upper/lower-case everything, but this seemed easier. On
# the other hand, I don't know if CaseInsensitiveDict is public API...?
# First establish defaults
merged_headers = requests.structures.CaseInsensitiveDict({
"User-Agent": self.user_agent
})
# Then overlay with specifics from post/get methods
if call_specific_headers:
merged_headers.update(call_specific_headers)
# Special "User-Agent" header check, to ensure one is always sent.
# The call-specific overlay could have null'd out that header.
if not merged_headers.get("User-Agent"):
merged_headers["User-Agent"] = self.user_agent
return merged_headers | Merge headers from different sources together. Headers passed to the
post/get methods have highest priority, then headers associated with
the connection object itself have next priority.
:param call_specific_headers: A header dict from the get/post call, or
None (the default for those methods).
:return: A key-case-insensitive MutableMapping object which contains
the merged headers. (This doesn't actually return a dict.) | Below is the the instruction that describes the task:
### Input:
Merge headers from different sources together. Headers passed to the
post/get methods have highest priority, then headers associated with
the connection object itself have next priority.
:param call_specific_headers: A header dict from the get/post call, or
None (the default for those methods).
:return: A key-case-insensitive MutableMapping object which contains
the merged headers. (This doesn't actually return a dict.)
### Response:
def _merge_headers(self, call_specific_headers):
"""
Merge headers from different sources together. Headers passed to the
post/get methods have highest priority, then headers associated with
the connection object itself have next priority.
:param call_specific_headers: A header dict from the get/post call, or
None (the default for those methods).
:return: A key-case-insensitive MutableMapping object which contains
the merged headers. (This doesn't actually return a dict.)
"""
# A case-insensitive mapping is necessary here so that there is
# predictable behavior. If a plain dict were used, you'd get keys in
# the merged dict which differ only in case. The requests library
# would merge them internally, and it would be unpredictable which key
# is chosen for the final set of headers. Another possible approach
# would be to upper/lower-case everything, but this seemed easier. On
# the other hand, I don't know if CaseInsensitiveDict is public API...?
# First establish defaults
merged_headers = requests.structures.CaseInsensitiveDict({
"User-Agent": self.user_agent
})
# Then overlay with specifics from post/get methods
if call_specific_headers:
merged_headers.update(call_specific_headers)
# Special "User-Agent" header check, to ensure one is always sent.
# The call-specific overlay could have null'd out that header.
if not merged_headers.get("User-Agent"):
merged_headers["User-Agent"] = self.user_agent
return merged_headers |
def get_source(self, doc):
"""
Grab contents of 'doc' and return it
:param doc: The active document
:return:
"""
start_iter = doc.get_start_iter()
end_iter = doc.get_end_iter()
source = doc.get_text(start_iter, end_iter, False)
return source | Grab contents of 'doc' and return it
:param doc: The active document
:return: | Below is the the instruction that describes the task:
### Input:
Grab contents of 'doc' and return it
:param doc: The active document
:return:
### Response:
def get_source(self, doc):
"""
Grab contents of 'doc' and return it
:param doc: The active document
:return:
"""
start_iter = doc.get_start_iter()
end_iter = doc.get_end_iter()
source = doc.get_text(start_iter, end_iter, False)
return source |
def delete_checkpoint(self, checkpoint_dir):
"""Removes subdirectory within checkpoint_folder
Parameters
----------
checkpoint_dir : path to checkpoint
"""
if os.path.isfile(checkpoint_dir):
shutil.rmtree(os.path.dirname(checkpoint_dir))
else:
shutil.rmtree(checkpoint_dir) | Removes subdirectory within checkpoint_folder
Parameters
----------
checkpoint_dir : path to checkpoint | Below is the the instruction that describes the task:
### Input:
Removes subdirectory within checkpoint_folder
Parameters
----------
checkpoint_dir : path to checkpoint
### Response:
def delete_checkpoint(self, checkpoint_dir):
"""Removes subdirectory within checkpoint_folder
Parameters
----------
checkpoint_dir : path to checkpoint
"""
if os.path.isfile(checkpoint_dir):
shutil.rmtree(os.path.dirname(checkpoint_dir))
else:
shutil.rmtree(checkpoint_dir) |
def check_type(self, value):
"""Hook for type-checking, invoked during assignment.
raises TypeError if neither value nor self.dtype are None and they
do not match.
will not raise an exception if either value or self.dtype is None
"""
if self.__dict__['dtype'] is None:
return
elif value is None:
return
elif isinstance(value, self.__dict__['dtype']):
return
msg = "Value of type %s, when %s was expected." % (
type(value), self.__dict__['dtype'])
raise TypeError(msg) | Hook for type-checking, invoked during assignment.
raises TypeError if neither value nor self.dtype are None and they
do not match.
will not raise an exception if either value or self.dtype is None | Below is the the instruction that describes the task:
### Input:
Hook for type-checking, invoked during assignment.
raises TypeError if neither value nor self.dtype are None and they
do not match.
will not raise an exception if either value or self.dtype is None
### Response:
def check_type(self, value):
"""Hook for type-checking, invoked during assignment.
raises TypeError if neither value nor self.dtype are None and they
do not match.
will not raise an exception if either value or self.dtype is None
"""
if self.__dict__['dtype'] is None:
return
elif value is None:
return
elif isinstance(value, self.__dict__['dtype']):
return
msg = "Value of type %s, when %s was expected." % (
type(value), self.__dict__['dtype'])
raise TypeError(msg) |
def confirmation_pdf(self, confirmation_id):
"""
Opens a pdf of a confirmation
:param confirmation_id: the confirmation id
:return: dict
"""
return self._create_get_request(resource=CONFIRMATIONS, billomat_id=confirmation_id, command=PDF) | Opens a pdf of a confirmation
:param confirmation_id: the confirmation id
:return: dict | Below is the the instruction that describes the task:
### Input:
Opens a pdf of a confirmation
:param confirmation_id: the confirmation id
:return: dict
### Response:
def confirmation_pdf(self, confirmation_id):
"""
Opens a pdf of a confirmation
:param confirmation_id: the confirmation id
:return: dict
"""
return self._create_get_request(resource=CONFIRMATIONS, billomat_id=confirmation_id, command=PDF) |
def trans_his(self, symbol='', start=0, offset=10, date=''):
'''
查询历史分笔成交
:param market: 市场代码
:param symbol: 股票代码
:param start: 起始位置
:param offset: 数量
:param date: 日期
:return: pd.dataFrame or None
'''
market = get_stock_market(symbol)
with self.client.connect(*self.bestip):
data = self.client.get_history_transaction_data(
int(market), symbol, int(start), int(offset), date)
return self.client.to_df(data) | 查询历史分笔成交
:param market: 市场代码
:param symbol: 股票代码
:param start: 起始位置
:param offset: 数量
:param date: 日期
:return: pd.dataFrame or None | Below is the the instruction that describes the task:
### Input:
查询历史分笔成交
:param market: 市场代码
:param symbol: 股票代码
:param start: 起始位置
:param offset: 数量
:param date: 日期
:return: pd.dataFrame or None
### Response:
def trans_his(self, symbol='', start=0, offset=10, date=''):
'''
查询历史分笔成交
:param market: 市场代码
:param symbol: 股票代码
:param start: 起始位置
:param offset: 数量
:param date: 日期
:return: pd.dataFrame or None
'''
market = get_stock_market(symbol)
with self.client.connect(*self.bestip):
data = self.client.get_history_transaction_data(
int(market), symbol, int(start), int(offset), date)
return self.client.to_df(data) |
def failures():
"""Show any unexpected failures"""
if not HAVE_BIN_LIBS:
click.echo("missing required binary libs (lz4, msgpack)")
return
q = Queue('failed', connection=worker.connection)
for i in q.get_job_ids():
j = q.job_class.fetch(i, connection=q.connection)
click.echo("%s on %s" % (j.func_name, j.origin))
if not j.func_name.endswith('process_keyset'):
click.echo("params %s %s" % (j._args, j._kwargs))
click.echo(j.exc_info) | Show any unexpected failures | Below is the the instruction that describes the task:
### Input:
Show any unexpected failures
### Response:
def failures():
"""Show any unexpected failures"""
if not HAVE_BIN_LIBS:
click.echo("missing required binary libs (lz4, msgpack)")
return
q = Queue('failed', connection=worker.connection)
for i in q.get_job_ids():
j = q.job_class.fetch(i, connection=q.connection)
click.echo("%s on %s" % (j.func_name, j.origin))
if not j.func_name.endswith('process_keyset'):
click.echo("params %s %s" % (j._args, j._kwargs))
click.echo(j.exc_info) |
def available_languages(wordlist='best'):
"""
Given a wordlist name, return a dictionary of language codes to filenames,
representing all the languages in which that wordlist is available.
"""
if wordlist == 'best':
available = available_languages('small')
available.update(available_languages('large'))
return available
elif wordlist == 'combined':
logger.warning(
"The 'combined' wordlists have been renamed to 'small'."
)
wordlist = 'small'
available = {}
for path in DATA_PATH.glob('*.msgpack.gz'):
if not path.name.startswith('_'):
list_name = path.name.split('.')[0]
name, lang = list_name.split('_')
if name == wordlist:
available[lang] = str(path)
return available | Given a wordlist name, return a dictionary of language codes to filenames,
representing all the languages in which that wordlist is available. | Below is the the instruction that describes the task:
### Input:
Given a wordlist name, return a dictionary of language codes to filenames,
representing all the languages in which that wordlist is available.
### Response:
def available_languages(wordlist='best'):
"""
Given a wordlist name, return a dictionary of language codes to filenames,
representing all the languages in which that wordlist is available.
"""
if wordlist == 'best':
available = available_languages('small')
available.update(available_languages('large'))
return available
elif wordlist == 'combined':
logger.warning(
"The 'combined' wordlists have been renamed to 'small'."
)
wordlist = 'small'
available = {}
for path in DATA_PATH.glob('*.msgpack.gz'):
if not path.name.startswith('_'):
list_name = path.name.split('.')[0]
name, lang = list_name.split('_')
if name == wordlist:
available[lang] = str(path)
return available |
def register(self, name, callback, filter):
"""
register: string, function: string, data -> None,
function: data -> boolean -> None
Register will save the given name, callback, and filter function
for use when a packet arrives. When one arrives, the filter
function will be called to determine whether to call its associated
callback function. If the filter method returns true, the callback
method will be called with its associated name string and the packet
which triggered the call.
"""
if name in self.names:
raise ValueError("A callback has already been registered with \
the name '%s'" % name)
self.handlers.append({
'name': name,
'callback': callback,
'filter': filter
})
self.names.add(name) | register: string, function: string, data -> None,
function: data -> boolean -> None
Register will save the given name, callback, and filter function
for use when a packet arrives. When one arrives, the filter
function will be called to determine whether to call its associated
callback function. If the filter method returns true, the callback
method will be called with its associated name string and the packet
which triggered the call. | Below is the the instruction that describes the task:
### Input:
register: string, function: string, data -> None,
function: data -> boolean -> None
Register will save the given name, callback, and filter function
for use when a packet arrives. When one arrives, the filter
function will be called to determine whether to call its associated
callback function. If the filter method returns true, the callback
method will be called with its associated name string and the packet
which triggered the call.
### Response:
def register(self, name, callback, filter):
"""
register: string, function: string, data -> None,
function: data -> boolean -> None
Register will save the given name, callback, and filter function
for use when a packet arrives. When one arrives, the filter
function will be called to determine whether to call its associated
callback function. If the filter method returns true, the callback
method will be called with its associated name string and the packet
which triggered the call.
"""
if name in self.names:
raise ValueError("A callback has already been registered with \
the name '%s'" % name)
self.handlers.append({
'name': name,
'callback': callback,
'filter': filter
})
self.names.add(name) |
def _get_scenarios(network_id, include_data, user_id, scenario_ids=None):
"""
Get all the scenarios in a network
"""
scen_qry = db.DBSession.query(Scenario).filter(
Scenario.network_id == network_id).options(
noload('network')).filter(
Scenario.status == 'A')
if scenario_ids:
logging.info("Filtering by scenario_ids %s",scenario_ids)
scen_qry = scen_qry.filter(Scenario.id.in_(scenario_ids))
extras = {'resourcescenarios': [], 'resourcegroupitems': []}
scens = [JSONObject(s,extras=extras) for s in db.DBSession.execute(scen_qry.statement).fetchall()]
all_resource_group_items = _get_all_group_items(network_id)
if include_data == 'Y' or include_data == True:
all_rs = _get_all_resourcescenarios(network_id, user_id)
metadata = _get_metadata(network_id, user_id)
for s in scens:
s.resourcegroupitems = all_resource_group_items.get(s.id, [])
if include_data == 'Y' or include_data == True:
s.resourcescenarios = all_rs.get(s.id, [])
for rs in s.resourcescenarios:
rs.dataset.metadata = metadata.get(rs.dataset_id, {})
return scens | Get all the scenarios in a network | Below is the the instruction that describes the task:
### Input:
Get all the scenarios in a network
### Response:
def _get_scenarios(network_id, include_data, user_id, scenario_ids=None):
"""
Get all the scenarios in a network
"""
scen_qry = db.DBSession.query(Scenario).filter(
Scenario.network_id == network_id).options(
noload('network')).filter(
Scenario.status == 'A')
if scenario_ids:
logging.info("Filtering by scenario_ids %s",scenario_ids)
scen_qry = scen_qry.filter(Scenario.id.in_(scenario_ids))
extras = {'resourcescenarios': [], 'resourcegroupitems': []}
scens = [JSONObject(s,extras=extras) for s in db.DBSession.execute(scen_qry.statement).fetchall()]
all_resource_group_items = _get_all_group_items(network_id)
if include_data == 'Y' or include_data == True:
all_rs = _get_all_resourcescenarios(network_id, user_id)
metadata = _get_metadata(network_id, user_id)
for s in scens:
s.resourcegroupitems = all_resource_group_items.get(s.id, [])
if include_data == 'Y' or include_data == True:
s.resourcescenarios = all_rs.get(s.id, [])
for rs in s.resourcescenarios:
rs.dataset.metadata = metadata.get(rs.dataset_id, {})
return scens |
def small_parts(script, ratio=0.2, non_closed_only=False):
""" Select & delete the small disconnected parts (components) of a mesh.
Args:
script: the FilterScript object or script filename to write
the filter to.
ratio (float): This ratio (between 0 and 1) defines the meaning of
'small' as the threshold ratio between the number of faces of the
largest component and the other ones. A larger value will select
more components.
non_closed_only (bool): Select only non-closed components.
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA
"""
select.small_parts(script, ratio, non_closed_only)
selected(script)
return None | Select & delete the small disconnected parts (components) of a mesh.
Args:
script: the FilterScript object or script filename to write
the filter to.
ratio (float): This ratio (between 0 and 1) defines the meaning of
'small' as the threshold ratio between the number of faces of the
largest component and the other ones. A larger value will select
more components.
non_closed_only (bool): Select only non-closed components.
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA | Below is the the instruction that describes the task:
### Input:
Select & delete the small disconnected parts (components) of a mesh.
Args:
script: the FilterScript object or script filename to write
the filter to.
ratio (float): This ratio (between 0 and 1) defines the meaning of
'small' as the threshold ratio between the number of faces of the
largest component and the other ones. A larger value will select
more components.
non_closed_only (bool): Select only non-closed components.
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA
### Response:
def small_parts(script, ratio=0.2, non_closed_only=False):
""" Select & delete the small disconnected parts (components) of a mesh.
Args:
script: the FilterScript object or script filename to write
the filter to.
ratio (float): This ratio (between 0 and 1) defines the meaning of
'small' as the threshold ratio between the number of faces of the
largest component and the other ones. A larger value will select
more components.
non_closed_only (bool): Select only non-closed components.
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA
"""
select.small_parts(script, ratio, non_closed_only)
selected(script)
return None |
def dx_orbit_sys(t, X):
'''X = [
m1x, m1y,
m2x, m2y,
m3x, m3y,
m4x, m4y,
m1vx, m1vy,
m2vx, m2vy,
m3vx, m3vy,
m4vx, m4vy
]
'''
(m1x, m1y,
m2x, m2y,
m3x, m3y,
m4x, m4y,
m1vx, m1vy,
m2vx, m2vy,
m3vx, m3vy,
m4vx, m4vy) = X
m_moon1 = 7.342*(10**22) # kg
m_moon2 = 7.342*(10**22) # kg
m_moon3 = 7.342*(10**22) # kg
m_moon4 = 7.342*(10**22) # kg
G = 6.67408*(10**-11) # m**3 kg**−1 s**−2
dm12 = sqrt((m1x - m2x)**2 + (m1y - m2y)**2)
dm13 = sqrt((m1x - m3x)**2 + (m1y - m3y)**2)
dm14 = sqrt((m1x - m4x)**2 + (m1y - m4y)**2)
dm23 = sqrt((m2x - m3x)**2 + (m2y - m3y)**2)
dm24 = sqrt((m2x - m4x)**2 + (m2y - m4y)**2)
dm34 = sqrt((m3x - m4x)**2 + (m3y - m4y)**2)
f12 = G * m_moon1 * m_moon2 / (dm12 * dm12)
f13 = G * m_moon1 * m_moon3 / (dm13 * dm13)
f14 = G * m_moon1 * m_moon4 / (dm14 * dm14)
f23 = G * m_moon2 * m_moon3 / (dm23 * dm23)
f24 = G * m_moon2 * m_moon4 / (dm24 * dm24)
f34 = G * m_moon3 * m_moon4 / (dm34 * dm34)
dr12 = atan2(m2y - m1y, m2x - m1x)
dr13 = atan2(m3y - m1y, m3x - m1x)
dr14 = atan2(m4y - m1y, m4x - m1x)
dr23 = atan2(m3y - m2y, m3x - m2x)
dr24 = atan2(m4y - m2y, m4x - m2x)
dr34 = atan2(m4y - m3y, m4x - m3x)
f1x = f12 * cos(dr12) + f13 * cos(dr13) + f14 * cos(dr14)
f1y = f12 * sin(dr12) + f13 * sin(dr13) + f14 * sin(dr14)
f2x = f12 * cos(dr12 + pi) + f23 * cos(dr23) + f24 * cos(dr24)
f2y = f12 * sin(dr12 + pi) + f23 * sin(dr23) + f24 * sin(dr24)
f3x = f13 * cos(dr13 + pi) + f23 * cos(dr23 + pi) + f34 * cos(dr34)
f3y = f13 * sin(dr13 + pi) + f23 * sin(dr23 + pi) + f34 * sin(dr34)
f4x = f14 * cos(dr14 + pi) + f24 * cos(dr24 + pi) + f34 * cos(dr34 + pi)
f4y = f14 * sin(dr14 + pi) + f24 * sin(dr24 + pi) + f34 * sin(dr34 + pi)
dX = [
m1vx,
m1vy,
m2vx,
m2vy,
m3vx,
m3vy,
m4vx,
m4vy,
f1x / m_moon1,
f1y / m_moon1,
f2x / m_moon2,
f2y / m_moon2,
f3x / m_moon3,
f3y / m_moon3,
f4x / m_moon4,
f4y / m_moon4,
]
return dX | X = [
m1x, m1y,
m2x, m2y,
m3x, m3y,
m4x, m4y,
m1vx, m1vy,
m2vx, m2vy,
m3vx, m3vy,
m4vx, m4vy
] | Below is the the instruction that describes the task:
### Input:
X = [
m1x, m1y,
m2x, m2y,
m3x, m3y,
m4x, m4y,
m1vx, m1vy,
m2vx, m2vy,
m3vx, m3vy,
m4vx, m4vy
]
### Response:
def dx_orbit_sys(t, X):
'''X = [
m1x, m1y,
m2x, m2y,
m3x, m3y,
m4x, m4y,
m1vx, m1vy,
m2vx, m2vy,
m3vx, m3vy,
m4vx, m4vy
]
'''
(m1x, m1y,
m2x, m2y,
m3x, m3y,
m4x, m4y,
m1vx, m1vy,
m2vx, m2vy,
m3vx, m3vy,
m4vx, m4vy) = X
m_moon1 = 7.342*(10**22) # kg
m_moon2 = 7.342*(10**22) # kg
m_moon3 = 7.342*(10**22) # kg
m_moon4 = 7.342*(10**22) # kg
G = 6.67408*(10**-11) # m**3 kg**−1 s**−2
dm12 = sqrt((m1x - m2x)**2 + (m1y - m2y)**2)
dm13 = sqrt((m1x - m3x)**2 + (m1y - m3y)**2)
dm14 = sqrt((m1x - m4x)**2 + (m1y - m4y)**2)
dm23 = sqrt((m2x - m3x)**2 + (m2y - m3y)**2)
dm24 = sqrt((m2x - m4x)**2 + (m2y - m4y)**2)
dm34 = sqrt((m3x - m4x)**2 + (m3y - m4y)**2)
f12 = G * m_moon1 * m_moon2 / (dm12 * dm12)
f13 = G * m_moon1 * m_moon3 / (dm13 * dm13)
f14 = G * m_moon1 * m_moon4 / (dm14 * dm14)
f23 = G * m_moon2 * m_moon3 / (dm23 * dm23)
f24 = G * m_moon2 * m_moon4 / (dm24 * dm24)
f34 = G * m_moon3 * m_moon4 / (dm34 * dm34)
dr12 = atan2(m2y - m1y, m2x - m1x)
dr13 = atan2(m3y - m1y, m3x - m1x)
dr14 = atan2(m4y - m1y, m4x - m1x)
dr23 = atan2(m3y - m2y, m3x - m2x)
dr24 = atan2(m4y - m2y, m4x - m2x)
dr34 = atan2(m4y - m3y, m4x - m3x)
f1x = f12 * cos(dr12) + f13 * cos(dr13) + f14 * cos(dr14)
f1y = f12 * sin(dr12) + f13 * sin(dr13) + f14 * sin(dr14)
f2x = f12 * cos(dr12 + pi) + f23 * cos(dr23) + f24 * cos(dr24)
f2y = f12 * sin(dr12 + pi) + f23 * sin(dr23) + f24 * sin(dr24)
f3x = f13 * cos(dr13 + pi) + f23 * cos(dr23 + pi) + f34 * cos(dr34)
f3y = f13 * sin(dr13 + pi) + f23 * sin(dr23 + pi) + f34 * sin(dr34)
f4x = f14 * cos(dr14 + pi) + f24 * cos(dr24 + pi) + f34 * cos(dr34 + pi)
f4y = f14 * sin(dr14 + pi) + f24 * sin(dr24 + pi) + f34 * sin(dr34 + pi)
dX = [
m1vx,
m1vy,
m2vx,
m2vy,
m3vx,
m3vy,
m4vx,
m4vy,
f1x / m_moon1,
f1y / m_moon1,
f2x / m_moon2,
f2y / m_moon2,
f3x / m_moon3,
f3y / m_moon3,
f4x / m_moon4,
f4y / m_moon4,
]
return dX |
def set_label(self, label, lang):
""" Add the label of the collection in given lang
:param label: Label Value
:param lang: Language code
"""
try:
self.metadata.add(SKOS.prefLabel, Literal(label, lang=lang))
self.graph.addN([
(self.asNode(), RDFS.label, Literal(label, lang=lang), self.graph),
])
except Exception as E:
pass | Add the label of the collection in given lang
:param label: Label Value
:param lang: Language code | Below is the the instruction that describes the task:
### Input:
Add the label of the collection in given lang
:param label: Label Value
:param lang: Language code
### Response:
def set_label(self, label, lang):
""" Add the label of the collection in given lang
:param label: Label Value
:param lang: Language code
"""
try:
self.metadata.add(SKOS.prefLabel, Literal(label, lang=lang))
self.graph.addN([
(self.asNode(), RDFS.label, Literal(label, lang=lang), self.graph),
])
except Exception as E:
pass |
def ckan_extension_template(name, target):
"""
Create ckanext-(name) in target directory.
"""
setupdir = '{0}/ckanext-{1}theme'.format(target, name)
extdir = setupdir + '/ckanext/{0}theme'.format(name)
templatedir = extdir + '/templates/'
staticdir = extdir + '/static/datacats'
makedirs(templatedir + '/home/snippets')
makedirs(staticdir)
here = dirname(__file__)
copyfile(here + '/images/chart.png', staticdir + '/chart.png')
copyfile(here + '/images/datacats-footer.png',
staticdir + '/datacats-footer.png')
filecontents = [
(setupdir + '/setup.py', SETUP_PY),
(setupdir + '/.gitignore', DOT_GITIGNORE),
(setupdir + '/ckanext/__init__.py', NAMESPACE_PACKAGE),
(extdir + '/__init__.py', ''),
(extdir + '/plugins.py', PLUGINS_PY),
(templatedir + '/home/snippets/promoted.html', PROMOTED_SNIPPET),
(templatedir + '/footer.html', FOOTER_HTML),
]
for filename, content in filecontents:
with open(filename, 'w') as f:
f.write(content.replace('##name##', name)) | Create ckanext-(name) in target directory. | Below is the the instruction that describes the task:
### Input:
Create ckanext-(name) in target directory.
### Response:
def ckan_extension_template(name, target):
"""
Create ckanext-(name) in target directory.
"""
setupdir = '{0}/ckanext-{1}theme'.format(target, name)
extdir = setupdir + '/ckanext/{0}theme'.format(name)
templatedir = extdir + '/templates/'
staticdir = extdir + '/static/datacats'
makedirs(templatedir + '/home/snippets')
makedirs(staticdir)
here = dirname(__file__)
copyfile(here + '/images/chart.png', staticdir + '/chart.png')
copyfile(here + '/images/datacats-footer.png',
staticdir + '/datacats-footer.png')
filecontents = [
(setupdir + '/setup.py', SETUP_PY),
(setupdir + '/.gitignore', DOT_GITIGNORE),
(setupdir + '/ckanext/__init__.py', NAMESPACE_PACKAGE),
(extdir + '/__init__.py', ''),
(extdir + '/plugins.py', PLUGINS_PY),
(templatedir + '/home/snippets/promoted.html', PROMOTED_SNIPPET),
(templatedir + '/footer.html', FOOTER_HTML),
]
for filename, content in filecontents:
with open(filename, 'w') as f:
f.write(content.replace('##name##', name)) |
def shutdown(self):
"""
Request broker gracefully disconnect streams and stop. Safe to call
from any thread.
"""
_v and LOG.debug('%r.shutdown()', self)
def _shutdown():
self._alive = False
if self._alive and not self._exitted:
self.defer(_shutdown) | Request broker gracefully disconnect streams and stop. Safe to call
from any thread. | Below is the the instruction that describes the task:
### Input:
Request broker gracefully disconnect streams and stop. Safe to call
from any thread.
### Response:
def shutdown(self):
"""
Request broker gracefully disconnect streams and stop. Safe to call
from any thread.
"""
_v and LOG.debug('%r.shutdown()', self)
def _shutdown():
self._alive = False
if self._alive and not self._exitted:
self.defer(_shutdown) |
def remove_client(self, client):
# type: (object) -> None
"""Remove the client from the users of the socket.
If there are no more clients for the socket, it
will close automatically.
"""
try:
self._clients.remove(id(client))
except ValueError:
pass
if len(self._clients) < 1:
self.close() | Remove the client from the users of the socket.
If there are no more clients for the socket, it
will close automatically. | Below is the the instruction that describes the task:
### Input:
Remove the client from the users of the socket.
If there are no more clients for the socket, it
will close automatically.
### Response:
def remove_client(self, client):
# type: (object) -> None
"""Remove the client from the users of the socket.
If there are no more clients for the socket, it
will close automatically.
"""
try:
self._clients.remove(id(client))
except ValueError:
pass
if len(self._clients) < 1:
self.close() |
def __require_kytos_config(self):
"""Set path locations from kytosd API.
It should not be called directly, but from properties that require a
running kytosd instance.
"""
if self.__enabled is None:
uri = self._kytos_api + 'api/kytos/core/config/'
try:
options = json.loads(urllib.request.urlopen(uri).read())
except urllib.error.URLError:
print('Kytos is not running.')
sys.exit()
self.__enabled = Path(options.get('napps'))
self.__installed = Path(options.get('installed_napps')) | Set path locations from kytosd API.
It should not be called directly, but from properties that require a
running kytosd instance. | Below is the the instruction that describes the task:
### Input:
Set path locations from kytosd API.
It should not be called directly, but from properties that require a
running kytosd instance.
### Response:
def __require_kytos_config(self):
"""Set path locations from kytosd API.
It should not be called directly, but from properties that require a
running kytosd instance.
"""
if self.__enabled is None:
uri = self._kytos_api + 'api/kytos/core/config/'
try:
options = json.loads(urllib.request.urlopen(uri).read())
except urllib.error.URLError:
print('Kytos is not running.')
sys.exit()
self.__enabled = Path(options.get('napps'))
self.__installed = Path(options.get('installed_napps')) |
def set_selected_submission(self, course, task, submissionid):
""" Set submission whose id is `submissionid` to selected grading submission for the given course/task.
Returns a boolean indicating whether the operation was successful or not.
"""
submission = self.submission_manager.get_submission(submissionid)
# Do not continue if submission does not exist or is not owned by current user
if not submission:
return False
# Check if the submission if from this task/course!
if submission["taskid"] != task.get_id() or submission["courseid"] != course.get_id():
return False
is_staff = self.user_manager.has_staff_rights_on_course(course, self.user_manager.session_username())
# Do not enable submission selection after deadline
if not task.get_accessible_time().is_open() and not is_staff:
return False
# Only allow to set submission if the student must choose their best submission themselves
if task.get_evaluate() != 'student' and not is_staff:
return False
# Check if task is done per group/team
if task.is_group_task() and not is_staff:
group = self.database.aggregations.find_one(
{"courseid": task.get_course_id(), "groups.students": self.user_manager.session_username()},
{"groups": {"$elemMatch": {"students": self.user_manager.session_username()}}})
students = group["groups"][0]["students"]
else:
students = [self.user_manager.session_username()]
# Check if group/team is the same
if students == submission["username"]:
self.database.user_tasks.update_many(
{"courseid": task.get_course_id(), "taskid": task.get_id(), "username": {"$in": students}},
{"$set": {"submissionid": submission['_id'],
"grade": submission['grade'],
"succeeded": submission["result"] == "success"}})
return True
else:
return False | Set submission whose id is `submissionid` to selected grading submission for the given course/task.
Returns a boolean indicating whether the operation was successful or not. | Below is the the instruction that describes the task:
### Input:
Set submission whose id is `submissionid` to selected grading submission for the given course/task.
Returns a boolean indicating whether the operation was successful or not.
### Response:
def set_selected_submission(self, course, task, submissionid):
""" Set submission whose id is `submissionid` to selected grading submission for the given course/task.
Returns a boolean indicating whether the operation was successful or not.
"""
submission = self.submission_manager.get_submission(submissionid)
# Do not continue if submission does not exist or is not owned by current user
if not submission:
return False
# Check if the submission if from this task/course!
if submission["taskid"] != task.get_id() or submission["courseid"] != course.get_id():
return False
is_staff = self.user_manager.has_staff_rights_on_course(course, self.user_manager.session_username())
# Do not enable submission selection after deadline
if not task.get_accessible_time().is_open() and not is_staff:
return False
# Only allow to set submission if the student must choose their best submission themselves
if task.get_evaluate() != 'student' and not is_staff:
return False
# Check if task is done per group/team
if task.is_group_task() and not is_staff:
group = self.database.aggregations.find_one(
{"courseid": task.get_course_id(), "groups.students": self.user_manager.session_username()},
{"groups": {"$elemMatch": {"students": self.user_manager.session_username()}}})
students = group["groups"][0]["students"]
else:
students = [self.user_manager.session_username()]
# Check if group/team is the same
if students == submission["username"]:
self.database.user_tasks.update_many(
{"courseid": task.get_course_id(), "taskid": task.get_id(), "username": {"$in": students}},
{"$set": {"submissionid": submission['_id'],
"grade": submission['grade'],
"succeeded": submission["result"] == "success"}})
return True
else:
return False |
def get_dsn(d):
"""
Get the dataset name from a record
:param dict d: Metadata
:return str: Dataset name
"""
try:
return d["dataSetName"]
except Exception as e:
logger_misc.warn("get_dsn: Exception: No datasetname found, unable to continue: {}".format(e))
exit(1) | Get the dataset name from a record
:param dict d: Metadata
:return str: Dataset name | Below is the the instruction that describes the task:
### Input:
Get the dataset name from a record
:param dict d: Metadata
:return str: Dataset name
### Response:
def get_dsn(d):
"""
Get the dataset name from a record
:param dict d: Metadata
:return str: Dataset name
"""
try:
return d["dataSetName"]
except Exception as e:
logger_misc.warn("get_dsn: Exception: No datasetname found, unable to continue: {}".format(e))
exit(1) |
def signals_blocker(instance, attribute, *args, **kwargs):
"""
Blocks given instance signals before calling the given attribute with \
given arguments and then unblocks the signals.
:param instance: Instance object.
:type instance: QObject
:param attribute: Attribute to call.
:type attribute: QObject
:param \*args: Arguments.
:type \*args: \*
:param \*\*kwargs: Keywords arguments.
:type \*\*kwargs: \*\*
:return: Object.
:rtype: object
"""
value = None
try:
hasattr(instance, "blockSignals") and instance.blockSignals(True)
value = attribute(*args, **kwargs)
finally:
hasattr(instance, "blockSignals") and instance.blockSignals(False)
return value | Blocks given instance signals before calling the given attribute with \
given arguments and then unblocks the signals.
:param instance: Instance object.
:type instance: QObject
:param attribute: Attribute to call.
:type attribute: QObject
:param \*args: Arguments.
:type \*args: \*
:param \*\*kwargs: Keywords arguments.
:type \*\*kwargs: \*\*
:return: Object.
:rtype: object | Below is the the instruction that describes the task:
### Input:
Blocks given instance signals before calling the given attribute with \
given arguments and then unblocks the signals.
:param instance: Instance object.
:type instance: QObject
:param attribute: Attribute to call.
:type attribute: QObject
:param \*args: Arguments.
:type \*args: \*
:param \*\*kwargs: Keywords arguments.
:type \*\*kwargs: \*\*
:return: Object.
:rtype: object
### Response:
def signals_blocker(instance, attribute, *args, **kwargs):
"""
Blocks given instance signals before calling the given attribute with \
given arguments and then unblocks the signals.
:param instance: Instance object.
:type instance: QObject
:param attribute: Attribute to call.
:type attribute: QObject
:param \*args: Arguments.
:type \*args: \*
:param \*\*kwargs: Keywords arguments.
:type \*\*kwargs: \*\*
:return: Object.
:rtype: object
"""
value = None
try:
hasattr(instance, "blockSignals") and instance.blockSignals(True)
value = attribute(*args, **kwargs)
finally:
hasattr(instance, "blockSignals") and instance.blockSignals(False)
return value |
def get_labels(obj):
"""
Retrieve the labels of a clustering.rst object
:param obj: the clustering.rst object
:return: the resulting labels
"""
if Clustering.is_pyclustering_instance(obj.model):
return obj._labels_from_pyclusters
else:
return obj.model.labels_ | Retrieve the labels of a clustering.rst object
:param obj: the clustering.rst object
:return: the resulting labels | Below is the the instruction that describes the task:
### Input:
Retrieve the labels of a clustering.rst object
:param obj: the clustering.rst object
:return: the resulting labels
### Response:
def get_labels(obj):
"""
Retrieve the labels of a clustering.rst object
:param obj: the clustering.rst object
:return: the resulting labels
"""
if Clustering.is_pyclustering_instance(obj.model):
return obj._labels_from_pyclusters
else:
return obj.model.labels_ |
def list_orgs(self):
""" list the orgs configured in the keychain """
orgs = list(self.orgs.keys())
orgs.sort()
return orgs | list the orgs configured in the keychain | Below is the the instruction that describes the task:
### Input:
list the orgs configured in the keychain
### Response:
def list_orgs(self):
""" list the orgs configured in the keychain """
orgs = list(self.orgs.keys())
orgs.sort()
return orgs |
def merge(obj_a, obj_b, strategy='smart', renderer='yaml', merge_lists=False):
'''
Merge a data structure into another by choosing a merge strategy
Strategies:
* aggregate
* list
* overwrite
* recurse
* smart
CLI Example:
.. code-block:: shell
salt '*' slsutil.merge '{foo: Foo}' '{bar: Bar}'
'''
return salt.utils.dictupdate.merge(obj_a, obj_b, strategy, renderer,
merge_lists) | Merge a data structure into another by choosing a merge strategy
Strategies:
* aggregate
* list
* overwrite
* recurse
* smart
CLI Example:
.. code-block:: shell
salt '*' slsutil.merge '{foo: Foo}' '{bar: Bar}' | Below is the the instruction that describes the task:
### Input:
Merge a data structure into another by choosing a merge strategy
Strategies:
* aggregate
* list
* overwrite
* recurse
* smart
CLI Example:
.. code-block:: shell
salt '*' slsutil.merge '{foo: Foo}' '{bar: Bar}'
### Response:
def merge(obj_a, obj_b, strategy='smart', renderer='yaml', merge_lists=False):
'''
Merge a data structure into another by choosing a merge strategy
Strategies:
* aggregate
* list
* overwrite
* recurse
* smart
CLI Example:
.. code-block:: shell
salt '*' slsutil.merge '{foo: Foo}' '{bar: Bar}'
'''
return salt.utils.dictupdate.merge(obj_a, obj_b, strategy, renderer,
merge_lists) |
def load_page_buffer(self, buffer_number, address, bytes):
"""!
@brief Load data to a numbered page buffer.
This method is used in conjunction with start_program_page_with_buffer() to implement
double buffered programming.
"""
assert buffer_number < len(self.page_buffers), "Invalid buffer number"
# prevent security settings from locking the device
bytes = self.override_security_bits(address, bytes)
# transfer the buffer to device RAM
self.target.write_memory_block8(self.page_buffers[buffer_number], bytes) | !
@brief Load data to a numbered page buffer.
This method is used in conjunction with start_program_page_with_buffer() to implement
double buffered programming. | Below is the the instruction that describes the task:
### Input:
!
@brief Load data to a numbered page buffer.
This method is used in conjunction with start_program_page_with_buffer() to implement
double buffered programming.
### Response:
def load_page_buffer(self, buffer_number, address, bytes):
"""!
@brief Load data to a numbered page buffer.
This method is used in conjunction with start_program_page_with_buffer() to implement
double buffered programming.
"""
assert buffer_number < len(self.page_buffers), "Invalid buffer number"
# prevent security settings from locking the device
bytes = self.override_security_bits(address, bytes)
# transfer the buffer to device RAM
self.target.write_memory_block8(self.page_buffers[buffer_number], bytes) |
def _get_classifier(self, prefix):
""" Construct a decoder for the next sentence prediction task """
with self.name_scope():
classifier = nn.Dense(2, prefix=prefix)
return classifier | Construct a decoder for the next sentence prediction task | Below is the the instruction that describes the task:
### Input:
Construct a decoder for the next sentence prediction task
### Response:
def _get_classifier(self, prefix):
""" Construct a decoder for the next sentence prediction task """
with self.name_scope():
classifier = nn.Dense(2, prefix=prefix)
return classifier |
def rename_state_fluent(name: str) -> str:
'''Returns current state fluent canonical name.
Args:
name (str): The next state fluent name.
Returns:
str: The current state fluent name.
'''
i = name.index('/')
functor = name[:i]
arity = name[i+1:]
return "{}'/{}".format(functor, arity) | Returns current state fluent canonical name.
Args:
name (str): The next state fluent name.
Returns:
str: The current state fluent name. | Below is the the instruction that describes the task:
### Input:
Returns current state fluent canonical name.
Args:
name (str): The next state fluent name.
Returns:
str: The current state fluent name.
### Response:
def rename_state_fluent(name: str) -> str:
'''Returns current state fluent canonical name.
Args:
name (str): The next state fluent name.
Returns:
str: The current state fluent name.
'''
i = name.index('/')
functor = name[:i]
arity = name[i+1:]
return "{}'/{}".format(functor, arity) |
def delete_keys(self, keys, quiet=False, mfa_token=None, headers=None):
"""
Deletes a set of keys using S3's Multi-object delete API. If a
VersionID is specified for that key then that version is removed.
Returns a MultiDeleteResult Object, which contains Deleted
and Error elements for each key you ask to delete.
:type keys: list
:param keys: A list of either key_names or (key_name, versionid) pairs
or a list of Key instances.
:type quiet: boolean
:param quiet: In quiet mode the response includes only keys where
the delete operation encountered an error. For a
successful deletion, the operation does not return
any information about the delete in the response body.
:type mfa_token: tuple or list of strings
:param mfa_token: A tuple or list consisting of the serial number
from the MFA device and the current value of
the six-digit token associated with the device.
This value is required anytime you are
deleting versioned objects from a bucket
that has the MFADelete option on the bucket.
:returns: An instance of MultiDeleteResult
"""
ikeys = iter(keys)
result = MultiDeleteResult(self)
provider = self.connection.provider
query_args = 'delete'
def delete_keys2(hdrs):
hdrs = hdrs or {}
data = u"""<?xml version="1.0" encoding="UTF-8"?>"""
data += u"<Delete>"
if quiet:
data += u"<Quiet>true</Quiet>"
count = 0
while count < 1000:
try:
key = ikeys.next()
except StopIteration:
break
if isinstance(key, basestring):
key_name = key
version_id = None
elif isinstance(key, tuple) and len(key) == 2:
key_name, version_id = key
elif (isinstance(key, Key) or isinstance(key, DeleteMarker)) and key.name:
key_name = key.name
version_id = key.version_id
else:
if isinstance(key, Prefix):
key_name = key.name
code = 'PrefixSkipped' # Don't delete Prefix
else:
key_name = repr(key) # try get a string
code = 'InvalidArgument' # other unknown type
message = 'Invalid. No delete action taken for this object.'
error = Error(key_name, code=code, message=message)
result.errors.append(error)
continue
count += 1
#key_name = key_name.decode('utf-8')
data += u"<Object><Key>%s</Key>" % xml.sax.saxutils.escape(key_name)
if version_id:
data += u"<VersionId>%s</VersionId>" % version_id
data += u"</Object>"
data += u"</Delete>"
if count <= 0:
return False # no more
data = data.encode('utf-8')
fp = StringIO.StringIO(data)
md5 = boto.utils.compute_md5(fp)
hdrs['Content-MD5'] = md5[1]
hdrs['Content-Type'] = 'text/xml'
if mfa_token:
hdrs[provider.mfa_header] = ' '.join(mfa_token)
response = self.connection.make_request('POST', self.name,
headers=hdrs,
query_args=query_args,
data=data)
body = response.read()
if response.status == 200:
h = handler.XmlHandler(result, self)
xml.sax.parseString(body, h)
return count >= 1000 # more?
else:
raise provider.storage_response_error(response.status,
response.reason,
body)
while delete_keys2(headers):
pass
return result | Deletes a set of keys using S3's Multi-object delete API. If a
VersionID is specified for that key then that version is removed.
Returns a MultiDeleteResult Object, which contains Deleted
and Error elements for each key you ask to delete.
:type keys: list
:param keys: A list of either key_names or (key_name, versionid) pairs
or a list of Key instances.
:type quiet: boolean
:param quiet: In quiet mode the response includes only keys where
the delete operation encountered an error. For a
successful deletion, the operation does not return
any information about the delete in the response body.
:type mfa_token: tuple or list of strings
:param mfa_token: A tuple or list consisting of the serial number
from the MFA device and the current value of
the six-digit token associated with the device.
This value is required anytime you are
deleting versioned objects from a bucket
that has the MFADelete option on the bucket.
:returns: An instance of MultiDeleteResult | Below is the the instruction that describes the task:
### Input:
Deletes a set of keys using S3's Multi-object delete API. If a
VersionID is specified for that key then that version is removed.
Returns a MultiDeleteResult Object, which contains Deleted
and Error elements for each key you ask to delete.
:type keys: list
:param keys: A list of either key_names or (key_name, versionid) pairs
or a list of Key instances.
:type quiet: boolean
:param quiet: In quiet mode the response includes only keys where
the delete operation encountered an error. For a
successful deletion, the operation does not return
any information about the delete in the response body.
:type mfa_token: tuple or list of strings
:param mfa_token: A tuple or list consisting of the serial number
from the MFA device and the current value of
the six-digit token associated with the device.
This value is required anytime you are
deleting versioned objects from a bucket
that has the MFADelete option on the bucket.
:returns: An instance of MultiDeleteResult
### Response:
def delete_keys(self, keys, quiet=False, mfa_token=None, headers=None):
"""
Deletes a set of keys using S3's Multi-object delete API. If a
VersionID is specified for that key then that version is removed.
Returns a MultiDeleteResult Object, which contains Deleted
and Error elements for each key you ask to delete.
:type keys: list
:param keys: A list of either key_names or (key_name, versionid) pairs
or a list of Key instances.
:type quiet: boolean
:param quiet: In quiet mode the response includes only keys where
the delete operation encountered an error. For a
successful deletion, the operation does not return
any information about the delete in the response body.
:type mfa_token: tuple or list of strings
:param mfa_token: A tuple or list consisting of the serial number
from the MFA device and the current value of
the six-digit token associated with the device.
This value is required anytime you are
deleting versioned objects from a bucket
that has the MFADelete option on the bucket.
:returns: An instance of MultiDeleteResult
"""
ikeys = iter(keys)
result = MultiDeleteResult(self)
provider = self.connection.provider
query_args = 'delete'
def delete_keys2(hdrs):
hdrs = hdrs or {}
data = u"""<?xml version="1.0" encoding="UTF-8"?>"""
data += u"<Delete>"
if quiet:
data += u"<Quiet>true</Quiet>"
count = 0
while count < 1000:
try:
key = ikeys.next()
except StopIteration:
break
if isinstance(key, basestring):
key_name = key
version_id = None
elif isinstance(key, tuple) and len(key) == 2:
key_name, version_id = key
elif (isinstance(key, Key) or isinstance(key, DeleteMarker)) and key.name:
key_name = key.name
version_id = key.version_id
else:
if isinstance(key, Prefix):
key_name = key.name
code = 'PrefixSkipped' # Don't delete Prefix
else:
key_name = repr(key) # try get a string
code = 'InvalidArgument' # other unknown type
message = 'Invalid. No delete action taken for this object.'
error = Error(key_name, code=code, message=message)
result.errors.append(error)
continue
count += 1
#key_name = key_name.decode('utf-8')
data += u"<Object><Key>%s</Key>" % xml.sax.saxutils.escape(key_name)
if version_id:
data += u"<VersionId>%s</VersionId>" % version_id
data += u"</Object>"
data += u"</Delete>"
if count <= 0:
return False # no more
data = data.encode('utf-8')
fp = StringIO.StringIO(data)
md5 = boto.utils.compute_md5(fp)
hdrs['Content-MD5'] = md5[1]
hdrs['Content-Type'] = 'text/xml'
if mfa_token:
hdrs[provider.mfa_header] = ' '.join(mfa_token)
response = self.connection.make_request('POST', self.name,
headers=hdrs,
query_args=query_args,
data=data)
body = response.read()
if response.status == 200:
h = handler.XmlHandler(result, self)
xml.sax.parseString(body, h)
return count >= 1000 # more?
else:
raise provider.storage_response_error(response.status,
response.reason,
body)
while delete_keys2(headers):
pass
return result |
def login(self, pin, user_type=CKU_USER):
"""
C_Login
:param pin: the user's PIN or None for CKF_PROTECTED_AUTHENTICATION_PATH
:type pin: string
:param user_type: the user type. The default value is
CKU_USER. You may also use CKU_SO
:type user_type: integer
"""
pin1 = ckbytelist(pin)
rv = self.lib.C_Login(self.session, user_type, pin1)
if rv != CKR_OK:
raise PyKCS11Error(rv) | C_Login
:param pin: the user's PIN or None for CKF_PROTECTED_AUTHENTICATION_PATH
:type pin: string
:param user_type: the user type. The default value is
CKU_USER. You may also use CKU_SO
:type user_type: integer | Below is the the instruction that describes the task:
### Input:
C_Login
:param pin: the user's PIN or None for CKF_PROTECTED_AUTHENTICATION_PATH
:type pin: string
:param user_type: the user type. The default value is
CKU_USER. You may also use CKU_SO
:type user_type: integer
### Response:
def login(self, pin, user_type=CKU_USER):
"""
C_Login
:param pin: the user's PIN or None for CKF_PROTECTED_AUTHENTICATION_PATH
:type pin: string
:param user_type: the user type. The default value is
CKU_USER. You may also use CKU_SO
:type user_type: integer
"""
pin1 = ckbytelist(pin)
rv = self.lib.C_Login(self.session, user_type, pin1)
if rv != CKR_OK:
raise PyKCS11Error(rv) |
def predict_proba(self, X):
"""
Apply transforms, and predict_proba of the final estimator
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
y_proba : array-like, shape = [n_samples, n_classes]
Predicted probability of each class
"""
Xt, _, _ = self._transform(X)
return self._final_estimator.predict_proba(Xt) | Apply transforms, and predict_proba of the final estimator
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
y_proba : array-like, shape = [n_samples, n_classes]
Predicted probability of each class | Below is the the instruction that describes the task:
### Input:
Apply transforms, and predict_proba of the final estimator
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
y_proba : array-like, shape = [n_samples, n_classes]
Predicted probability of each class
### Response:
def predict_proba(self, X):
"""
Apply transforms, and predict_proba of the final estimator
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
y_proba : array-like, shape = [n_samples, n_classes]
Predicted probability of each class
"""
Xt, _, _ = self._transform(X)
return self._final_estimator.predict_proba(Xt) |
def readAlignments(self, reads):
"""
Read lines of JSON from self._filename, convert them to read alignments
and yield them.
@param reads: An iterable of L{Read} instances, corresponding to the
reads that were given to BLAST.
@raise ValueError: If any of the lines in the file cannot be converted
to JSON.
@return: A generator that yields C{dark.alignments.ReadAlignments}
instances.
"""
if self._fp is None:
self._open(self._filename)
reads = iter(reads)
try:
for lineNumber, line in enumerate(self._fp, start=2):
try:
record = loads(line[:-1])
except ValueError as e:
raise ValueError(
'Could not convert line %d of %r to JSON (%s). '
'Line is %r.' %
(lineNumber, self._filename, e, line[:-1]))
else:
try:
read = next(reads)
except StopIteration:
raise ValueError(
'Read generator failed to yield read number %d '
'during parsing of BLAST file %r.' %
(lineNumber - 1, self._filename))
else:
alignments = self._dictToAlignments(record, read)
yield ReadAlignments(read, alignments)
finally:
self._fp.close()
self._fp = None | Read lines of JSON from self._filename, convert them to read alignments
and yield them.
@param reads: An iterable of L{Read} instances, corresponding to the
reads that were given to BLAST.
@raise ValueError: If any of the lines in the file cannot be converted
to JSON.
@return: A generator that yields C{dark.alignments.ReadAlignments}
instances. | Below is the the instruction that describes the task:
### Input:
Read lines of JSON from self._filename, convert them to read alignments
and yield them.
@param reads: An iterable of L{Read} instances, corresponding to the
reads that were given to BLAST.
@raise ValueError: If any of the lines in the file cannot be converted
to JSON.
@return: A generator that yields C{dark.alignments.ReadAlignments}
instances.
### Response:
def readAlignments(self, reads):
"""
Read lines of JSON from self._filename, convert them to read alignments
and yield them.
@param reads: An iterable of L{Read} instances, corresponding to the
reads that were given to BLAST.
@raise ValueError: If any of the lines in the file cannot be converted
to JSON.
@return: A generator that yields C{dark.alignments.ReadAlignments}
instances.
"""
if self._fp is None:
self._open(self._filename)
reads = iter(reads)
try:
for lineNumber, line in enumerate(self._fp, start=2):
try:
record = loads(line[:-1])
except ValueError as e:
raise ValueError(
'Could not convert line %d of %r to JSON (%s). '
'Line is %r.' %
(lineNumber, self._filename, e, line[:-1]))
else:
try:
read = next(reads)
except StopIteration:
raise ValueError(
'Read generator failed to yield read number %d '
'during parsing of BLAST file %r.' %
(lineNumber - 1, self._filename))
else:
alignments = self._dictToAlignments(record, read)
yield ReadAlignments(read, alignments)
finally:
self._fp.close()
self._fp = None |
def dump_xml(props, fp, comment=None, encoding='UTF-8', sort_keys=False):
"""
Write a series ``props`` of key-value pairs to a binary filehandle ``fp``
in the format of an XML properties file. The file will include both an XML
declaration and a doctype declaration.
:param props: A mapping or iterable of ``(key, value)`` pairs to write to
``fp``. All keys and values in ``props`` must be text strings. If
``sort_keys`` is `False`, the entries are output in iteration order.
:param fp: a file-like object to write the values of ``props`` to
:type fp: binary file-like object
:param comment: if non-`None`, ``comment`` will be output as a
``<comment>`` element before the ``<entry>`` elements
:type comment: text string or `None`
:param string encoding: the name of the encoding to use for the XML
document (also included in the XML declaration)
:param bool sort_keys: if true, the elements of ``props`` are sorted
lexicographically by key in the output
:return: `None`
"""
fp = codecs.lookup(encoding).streamwriter(fp, errors='xmlcharrefreplace')
print('<?xml version="1.0" encoding={0} standalone="no"?>'
.format(quoteattr(encoding)), file=fp)
for s in _stream_xml(props, comment, sort_keys):
print(s, file=fp) | Write a series ``props`` of key-value pairs to a binary filehandle ``fp``
in the format of an XML properties file. The file will include both an XML
declaration and a doctype declaration.
:param props: A mapping or iterable of ``(key, value)`` pairs to write to
``fp``. All keys and values in ``props`` must be text strings. If
``sort_keys`` is `False`, the entries are output in iteration order.
:param fp: a file-like object to write the values of ``props`` to
:type fp: binary file-like object
:param comment: if non-`None`, ``comment`` will be output as a
``<comment>`` element before the ``<entry>`` elements
:type comment: text string or `None`
:param string encoding: the name of the encoding to use for the XML
document (also included in the XML declaration)
:param bool sort_keys: if true, the elements of ``props`` are sorted
lexicographically by key in the output
:return: `None` | Below is the the instruction that describes the task:
### Input:
Write a series ``props`` of key-value pairs to a binary filehandle ``fp``
in the format of an XML properties file. The file will include both an XML
declaration and a doctype declaration.
:param props: A mapping or iterable of ``(key, value)`` pairs to write to
``fp``. All keys and values in ``props`` must be text strings. If
``sort_keys`` is `False`, the entries are output in iteration order.
:param fp: a file-like object to write the values of ``props`` to
:type fp: binary file-like object
:param comment: if non-`None`, ``comment`` will be output as a
``<comment>`` element before the ``<entry>`` elements
:type comment: text string or `None`
:param string encoding: the name of the encoding to use for the XML
document (also included in the XML declaration)
:param bool sort_keys: if true, the elements of ``props`` are sorted
lexicographically by key in the output
:return: `None`
### Response:
def dump_xml(props, fp, comment=None, encoding='UTF-8', sort_keys=False):
"""
Write a series ``props`` of key-value pairs to a binary filehandle ``fp``
in the format of an XML properties file. The file will include both an XML
declaration and a doctype declaration.
:param props: A mapping or iterable of ``(key, value)`` pairs to write to
``fp``. All keys and values in ``props`` must be text strings. If
``sort_keys`` is `False`, the entries are output in iteration order.
:param fp: a file-like object to write the values of ``props`` to
:type fp: binary file-like object
:param comment: if non-`None`, ``comment`` will be output as a
``<comment>`` element before the ``<entry>`` elements
:type comment: text string or `None`
:param string encoding: the name of the encoding to use for the XML
document (also included in the XML declaration)
:param bool sort_keys: if true, the elements of ``props`` are sorted
lexicographically by key in the output
:return: `None`
"""
fp = codecs.lookup(encoding).streamwriter(fp, errors='xmlcharrefreplace')
print('<?xml version="1.0" encoding={0} standalone="no"?>'
.format(quoteattr(encoding)), file=fp)
for s in _stream_xml(props, comment, sort_keys):
print(s, file=fp) |
def add_highlight(self, hl_group, line, col_start=0,
col_end=-1, src_id=-1, async_=None,
**kwargs):
"""Add a highlight to the buffer."""
async_ = check_async(async_, kwargs, src_id != 0)
return self.request('nvim_buf_add_highlight', src_id, hl_group,
line, col_start, col_end, async_=async_) | Add a highlight to the buffer. | Below is the the instruction that describes the task:
### Input:
Add a highlight to the buffer.
### Response:
def add_highlight(self, hl_group, line, col_start=0,
col_end=-1, src_id=-1, async_=None,
**kwargs):
"""Add a highlight to the buffer."""
async_ = check_async(async_, kwargs, src_id != 0)
return self.request('nvim_buf_add_highlight', src_id, hl_group,
line, col_start, col_end, async_=async_) |
def evaluate(self, dataset, metric='auto', missing_value_action='auto'):
"""
Evaluate the model on the given dataset.
Parameters
----------
dataset : SFrame
Dataset in the same format used for training. The columns names and
types of the dataset must be the same as that used in training.
metric : str, optional
Name of the evaluation metric. Possible values are:
'auto' : Compute all metrics.
'rmse' : Rooted mean squared error.
'max_error' : Maximum error.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : dict
A dictionary containing the evaluation result.
See Also
--------
create, predict
Examples
--------
.. sourcecode:: python
>>> results = model.evaluate(test_data, 'rmse')
"""
_raise_error_evaluation_metric_is_valid(
metric, ['auto', 'rmse', 'max_error'])
return super(RandomForestRegression, self).evaluate(dataset,
missing_value_action=missing_value_action,
metric=metric) | Evaluate the model on the given dataset.
Parameters
----------
dataset : SFrame
Dataset in the same format used for training. The columns names and
types of the dataset must be the same as that used in training.
metric : str, optional
Name of the evaluation metric. Possible values are:
'auto' : Compute all metrics.
'rmse' : Rooted mean squared error.
'max_error' : Maximum error.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : dict
A dictionary containing the evaluation result.
See Also
--------
create, predict
Examples
--------
.. sourcecode:: python
>>> results = model.evaluate(test_data, 'rmse') | Below is the the instruction that describes the task:
### Input:
Evaluate the model on the given dataset.
Parameters
----------
dataset : SFrame
Dataset in the same format used for training. The columns names and
types of the dataset must be the same as that used in training.
metric : str, optional
Name of the evaluation metric. Possible values are:
'auto' : Compute all metrics.
'rmse' : Rooted mean squared error.
'max_error' : Maximum error.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : dict
A dictionary containing the evaluation result.
See Also
--------
create, predict
Examples
--------
.. sourcecode:: python
>>> results = model.evaluate(test_data, 'rmse')
### Response:
def evaluate(self, dataset, metric='auto', missing_value_action='auto'):
"""
Evaluate the model on the given dataset.
Parameters
----------
dataset : SFrame
Dataset in the same format used for training. The columns names and
types of the dataset must be the same as that used in training.
metric : str, optional
Name of the evaluation metric. Possible values are:
'auto' : Compute all metrics.
'rmse' : Rooted mean squared error.
'max_error' : Maximum error.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : dict
A dictionary containing the evaluation result.
See Also
--------
create, predict
Examples
--------
.. sourcecode:: python
>>> results = model.evaluate(test_data, 'rmse')
"""
_raise_error_evaluation_metric_is_valid(
metric, ['auto', 'rmse', 'max_error'])
return super(RandomForestRegression, self).evaluate(dataset,
missing_value_action=missing_value_action,
metric=metric) |
def execute(self):
"""
Execute the actions necessary to perform a `molecule converge` and
returns None.
:return: None
"""
self.print_info()
self._config.provisioner.converge()
self._config.state.change_state('converged', True) | Execute the actions necessary to perform a `molecule converge` and
returns None.
:return: None | Below is the the instruction that describes the task:
### Input:
Execute the actions necessary to perform a `molecule converge` and
returns None.
:return: None
### Response:
def execute(self):
"""
Execute the actions necessary to perform a `molecule converge` and
returns None.
:return: None
"""
self.print_info()
self._config.provisioner.converge()
self._config.state.change_state('converged', True) |
def _make_routing_list(api_provider):
"""
Returns a list of routes to configure the Local API Service based on the APIs configured in the template.
Parameters
----------
api_provider : samcli.commands.local.lib.sam_api_provider.SamApiProvider
Returns
-------
list(samcli.local.apigw.service.Route)
List of Routes to pass to the service
"""
routes = []
for api in api_provider.get_all():
route = Route(methods=[api.method], function_name=api.function_name, path=api.path,
binary_types=api.binary_media_types)
routes.append(route)
return routes | Returns a list of routes to configure the Local API Service based on the APIs configured in the template.
Parameters
----------
api_provider : samcli.commands.local.lib.sam_api_provider.SamApiProvider
Returns
-------
list(samcli.local.apigw.service.Route)
List of Routes to pass to the service | Below is the the instruction that describes the task:
### Input:
Returns a list of routes to configure the Local API Service based on the APIs configured in the template.
Parameters
----------
api_provider : samcli.commands.local.lib.sam_api_provider.SamApiProvider
Returns
-------
list(samcli.local.apigw.service.Route)
List of Routes to pass to the service
### Response:
def _make_routing_list(api_provider):
"""
Returns a list of routes to configure the Local API Service based on the APIs configured in the template.
Parameters
----------
api_provider : samcli.commands.local.lib.sam_api_provider.SamApiProvider
Returns
-------
list(samcli.local.apigw.service.Route)
List of Routes to pass to the service
"""
routes = []
for api in api_provider.get_all():
route = Route(methods=[api.method], function_name=api.function_name, path=api.path,
binary_types=api.binary_media_types)
routes.append(route)
return routes |
def mmPrettyPrintDataOverlap(self):
"""
Returns pretty-printed string representation of overlap metric data.
(See `mmGetDataOverlap`.)
@return (string) Pretty-printed data
"""
matrix = self.mmGetDataOverlap()
resetsTrace = self.mmGetTraceResets()
text = ""
for i, row in enumerate(matrix):
if resetsTrace.data[i]:
text += "\n"
for j, item in enumerate(row):
if resetsTrace.data[j]:
text += " "
text += "{:4}".format(item)
text += "\n"
return text | Returns pretty-printed string representation of overlap metric data.
(See `mmGetDataOverlap`.)
@return (string) Pretty-printed data | Below is the the instruction that describes the task:
### Input:
Returns pretty-printed string representation of overlap metric data.
(See `mmGetDataOverlap`.)
@return (string) Pretty-printed data
### Response:
def mmPrettyPrintDataOverlap(self):
"""
Returns pretty-printed string representation of overlap metric data.
(See `mmGetDataOverlap`.)
@return (string) Pretty-printed data
"""
matrix = self.mmGetDataOverlap()
resetsTrace = self.mmGetTraceResets()
text = ""
for i, row in enumerate(matrix):
if resetsTrace.data[i]:
text += "\n"
for j, item in enumerate(row):
if resetsTrace.data[j]:
text += " "
text += "{:4}".format(item)
text += "\n"
return text |
def meraculous_runner(self):
"""
Check to make sure that the allAssembliesDir has been created, if not,
make it. This will only execute for the first time an assembly has been
run in this directory.
Run the directory from allAssembliesDir. The self.callString instance
attribute tells Meraculous to name the assembly directory self.runName.
After the run is complete, create the meraculous report, passing the
directory containing the run (aka self.thisAssemblyDir).
"""
#set the dir to temp assembly dir
os.chdir(self.allAssembliesDir)
print(self.callString)
p = subprocess.run(self.callString, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
output = str(p.stdout)
err = str(p.stderr)
#generate the report for the run
self._generate_report()
#exit, returning the output and err
return (output, err) | Check to make sure that the allAssembliesDir has been created, if not,
make it. This will only execute for the first time an assembly has been
run in this directory.
Run the directory from allAssembliesDir. The self.callString instance
attribute tells Meraculous to name the assembly directory self.runName.
After the run is complete, create the meraculous report, passing the
directory containing the run (aka self.thisAssemblyDir). | Below is the the instruction that describes the task:
### Input:
Check to make sure that the allAssembliesDir has been created, if not,
make it. This will only execute for the first time an assembly has been
run in this directory.
Run the directory from allAssembliesDir. The self.callString instance
attribute tells Meraculous to name the assembly directory self.runName.
After the run is complete, create the meraculous report, passing the
directory containing the run (aka self.thisAssemblyDir).
### Response:
def meraculous_runner(self):
"""
Check to make sure that the allAssembliesDir has been created, if not,
make it. This will only execute for the first time an assembly has been
run in this directory.
Run the directory from allAssembliesDir. The self.callString instance
attribute tells Meraculous to name the assembly directory self.runName.
After the run is complete, create the meraculous report, passing the
directory containing the run (aka self.thisAssemblyDir).
"""
#set the dir to temp assembly dir
os.chdir(self.allAssembliesDir)
print(self.callString)
p = subprocess.run(self.callString, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
output = str(p.stdout)
err = str(p.stderr)
#generate the report for the run
self._generate_report()
#exit, returning the output and err
return (output, err) |
def chooseReliableActiveFiringRate(cellsPerAxis, bumpSigma,
minimumActiveDiameter=None):
"""
When a cell is activated by sensory input, this implies that the phase is
within a particular small patch of the rhombus. This patch is roughly
equivalent to a circle of diameter (1/cellsPerAxis)(2/sqrt(3)), centered on
the cell. This 2/sqrt(3) accounts for the fact that when circles are packed
into hexagons, there are small uncovered spaces between the circles, so the
circles need to expand by a factor of (2/sqrt(3)) to cover this space.
This sensory input will activate the phase at the center of this cell. To
account for uncertainty of the actual phase that was used during learning,
the bump of active cells needs to be sufficiently large for this cell to
remain active until the bump has moved by the above diameter. So the
diameter of the bump (and, equivalently, the cell's firing field) needs to
be at least 2 of the above diameters.
@param minimumActiveDiameter (float or None)
If specified, this makes sure the bump of active cells is always above a
certain size. This is useful for testing scenarios where grid cell modules
can only encode location with a limited "readout resolution", matching the
biology.
@return
An "activeFiringRate" for use in the ThresholdedGaussian2DLocationModule.
"""
firingFieldDiameter = 2 * (1./cellsPerAxis)*(2./math.sqrt(3))
if minimumActiveDiameter:
firingFieldDiameter = max(firingFieldDiameter, minimumActiveDiameter)
return ThresholdedGaussian2DLocationModule.gaussian(
bumpSigma, firingFieldDiameter / 2.) | When a cell is activated by sensory input, this implies that the phase is
within a particular small patch of the rhombus. This patch is roughly
equivalent to a circle of diameter (1/cellsPerAxis)(2/sqrt(3)), centered on
the cell. This 2/sqrt(3) accounts for the fact that when circles are packed
into hexagons, there are small uncovered spaces between the circles, so the
circles need to expand by a factor of (2/sqrt(3)) to cover this space.
This sensory input will activate the phase at the center of this cell. To
account for uncertainty of the actual phase that was used during learning,
the bump of active cells needs to be sufficiently large for this cell to
remain active until the bump has moved by the above diameter. So the
diameter of the bump (and, equivalently, the cell's firing field) needs to
be at least 2 of the above diameters.
@param minimumActiveDiameter (float or None)
If specified, this makes sure the bump of active cells is always above a
certain size. This is useful for testing scenarios where grid cell modules
can only encode location with a limited "readout resolution", matching the
biology.
@return
An "activeFiringRate" for use in the ThresholdedGaussian2DLocationModule. | Below is the the instruction that describes the task:
### Input:
When a cell is activated by sensory input, this implies that the phase is
within a particular small patch of the rhombus. This patch is roughly
equivalent to a circle of diameter (1/cellsPerAxis)(2/sqrt(3)), centered on
the cell. This 2/sqrt(3) accounts for the fact that when circles are packed
into hexagons, there are small uncovered spaces between the circles, so the
circles need to expand by a factor of (2/sqrt(3)) to cover this space.
This sensory input will activate the phase at the center of this cell. To
account for uncertainty of the actual phase that was used during learning,
the bump of active cells needs to be sufficiently large for this cell to
remain active until the bump has moved by the above diameter. So the
diameter of the bump (and, equivalently, the cell's firing field) needs to
be at least 2 of the above diameters.
@param minimumActiveDiameter (float or None)
If specified, this makes sure the bump of active cells is always above a
certain size. This is useful for testing scenarios where grid cell modules
can only encode location with a limited "readout resolution", matching the
biology.
@return
An "activeFiringRate" for use in the ThresholdedGaussian2DLocationModule.
### Response:
def chooseReliableActiveFiringRate(cellsPerAxis, bumpSigma,
minimumActiveDiameter=None):
"""
When a cell is activated by sensory input, this implies that the phase is
within a particular small patch of the rhombus. This patch is roughly
equivalent to a circle of diameter (1/cellsPerAxis)(2/sqrt(3)), centered on
the cell. This 2/sqrt(3) accounts for the fact that when circles are packed
into hexagons, there are small uncovered spaces between the circles, so the
circles need to expand by a factor of (2/sqrt(3)) to cover this space.
This sensory input will activate the phase at the center of this cell. To
account for uncertainty of the actual phase that was used during learning,
the bump of active cells needs to be sufficiently large for this cell to
remain active until the bump has moved by the above diameter. So the
diameter of the bump (and, equivalently, the cell's firing field) needs to
be at least 2 of the above diameters.
@param minimumActiveDiameter (float or None)
If specified, this makes sure the bump of active cells is always above a
certain size. This is useful for testing scenarios where grid cell modules
can only encode location with a limited "readout resolution", matching the
biology.
@return
An "activeFiringRate" for use in the ThresholdedGaussian2DLocationModule.
"""
firingFieldDiameter = 2 * (1./cellsPerAxis)*(2./math.sqrt(3))
if minimumActiveDiameter:
firingFieldDiameter = max(firingFieldDiameter, minimumActiveDiameter)
return ThresholdedGaussian2DLocationModule.gaussian(
bumpSigma, firingFieldDiameter / 2.) |
def sensoryCompute(self, activeMinicolumns, learn):
"""
@param activeMinicolumns (numpy array)
List of indices of minicolumns to activate.
@param learn (bool)
If True, the two layers should learn this association.
@return (tuple of dicts)
Data for logging/tracing.
"""
inputParams = {
"activeColumns": activeMinicolumns,
"basalInput": self.getLocationRepresentation(),
"basalGrowthCandidates": self.getLearnableLocationRepresentation(),
"learn": learn
}
self.L4.compute(**inputParams)
locationParams = {
"anchorInput": self.L4.getActiveCells(),
"anchorGrowthCandidates": self.L4.getWinnerCells(),
"learn": learn,
}
for module in self.L6aModules:
module.sensoryCompute(**locationParams)
return (inputParams, locationParams) | @param activeMinicolumns (numpy array)
List of indices of minicolumns to activate.
@param learn (bool)
If True, the two layers should learn this association.
@return (tuple of dicts)
Data for logging/tracing. | Below is the the instruction that describes the task:
### Input:
@param activeMinicolumns (numpy array)
List of indices of minicolumns to activate.
@param learn (bool)
If True, the two layers should learn this association.
@return (tuple of dicts)
Data for logging/tracing.
### Response:
def sensoryCompute(self, activeMinicolumns, learn):
"""
@param activeMinicolumns (numpy array)
List of indices of minicolumns to activate.
@param learn (bool)
If True, the two layers should learn this association.
@return (tuple of dicts)
Data for logging/tracing.
"""
inputParams = {
"activeColumns": activeMinicolumns,
"basalInput": self.getLocationRepresentation(),
"basalGrowthCandidates": self.getLearnableLocationRepresentation(),
"learn": learn
}
self.L4.compute(**inputParams)
locationParams = {
"anchorInput": self.L4.getActiveCells(),
"anchorGrowthCandidates": self.L4.getWinnerCells(),
"learn": learn,
}
for module in self.L6aModules:
module.sensoryCompute(**locationParams)
return (inputParams, locationParams) |
def get_stp_mst_detail_output_cist_port_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_mst_detail = ET.Element("get_stp_mst_detail")
config = get_stp_mst_detail
output = ET.SubElement(get_stp_mst_detail, "output")
cist = ET.SubElement(output, "cist")
port = ET.SubElement(cist, "port")
interface_name = ET.SubElement(port, "interface-name")
interface_name.text = kwargs.pop('interface_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_stp_mst_detail_output_cist_port_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_mst_detail = ET.Element("get_stp_mst_detail")
config = get_stp_mst_detail
output = ET.SubElement(get_stp_mst_detail, "output")
cist = ET.SubElement(output, "cist")
port = ET.SubElement(cist, "port")
interface_name = ET.SubElement(port, "interface-name")
interface_name.text = kwargs.pop('interface_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def check_validity_of_long_form_args(model_obj, wide_weights, rows_to_obs):
"""
Ensures the args to `create_long_form_weights` have expected properties.
"""
# Ensure model_obj has the necessary method for create_long_form_weights
ensure_model_obj_has_mapping_constructor(model_obj)
# Ensure wide_weights is a 1D or 2D ndarray.
ensure_wide_weights_is_1D_or_2D_ndarray(wide_weights)
# Ensure rows_to_obs is a scipy sparse matrix
ensure_rows_to_obs_validity(rows_to_obs)
return None | Ensures the args to `create_long_form_weights` have expected properties. | Below is the the instruction that describes the task:
### Input:
Ensures the args to `create_long_form_weights` have expected properties.
### Response:
def check_validity_of_long_form_args(model_obj, wide_weights, rows_to_obs):
"""
Ensures the args to `create_long_form_weights` have expected properties.
"""
# Ensure model_obj has the necessary method for create_long_form_weights
ensure_model_obj_has_mapping_constructor(model_obj)
# Ensure wide_weights is a 1D or 2D ndarray.
ensure_wide_weights_is_1D_or_2D_ndarray(wide_weights)
# Ensure rows_to_obs is a scipy sparse matrix
ensure_rows_to_obs_validity(rows_to_obs)
return None |
def quote_header_value(value, extra_chars='', allow_token=True):
"""Quote a header value if necessary.
:param value: the value to quote.
:param extra_chars: a list of extra characters to skip quoting.
:param allow_token: if this is enabled token values are returned
unchanged.
"""
value = to_string(value)
if allow_token:
token_chars = HEADER_TOKEN_CHARS | set(extra_chars)
if set(value).issubset(token_chars):
return value
return '"%s"' % value.replace('\\', '\\\\').replace('"', '\\"') | Quote a header value if necessary.
:param value: the value to quote.
:param extra_chars: a list of extra characters to skip quoting.
:param allow_token: if this is enabled token values are returned
unchanged. | Below is the the instruction that describes the task:
### Input:
Quote a header value if necessary.
:param value: the value to quote.
:param extra_chars: a list of extra characters to skip quoting.
:param allow_token: if this is enabled token values are returned
unchanged.
### Response:
def quote_header_value(value, extra_chars='', allow_token=True):
"""Quote a header value if necessary.
:param value: the value to quote.
:param extra_chars: a list of extra characters to skip quoting.
:param allow_token: if this is enabled token values are returned
unchanged.
"""
value = to_string(value)
if allow_token:
token_chars = HEADER_TOKEN_CHARS | set(extra_chars)
if set(value).issubset(token_chars):
return value
return '"%s"' % value.replace('\\', '\\\\').replace('"', '\\"') |
def workload_state_compare(current_workload_state, workload_state):
""" Return highest priority of two states"""
hierarchy = {'unknown': -1,
'active': 0,
'maintenance': 1,
'waiting': 2,
'blocked': 3,
}
if hierarchy.get(workload_state) is None:
workload_state = 'unknown'
if hierarchy.get(current_workload_state) is None:
current_workload_state = 'unknown'
# Set workload_state based on hierarchy of statuses
if hierarchy.get(current_workload_state) > hierarchy.get(workload_state):
return current_workload_state
else:
return workload_state | Return highest priority of two states | Below is the the instruction that describes the task:
### Input:
Return highest priority of two states
### Response:
def workload_state_compare(current_workload_state, workload_state):
""" Return highest priority of two states"""
hierarchy = {'unknown': -1,
'active': 0,
'maintenance': 1,
'waiting': 2,
'blocked': 3,
}
if hierarchy.get(workload_state) is None:
workload_state = 'unknown'
if hierarchy.get(current_workload_state) is None:
current_workload_state = 'unknown'
# Set workload_state based on hierarchy of statuses
if hierarchy.get(current_workload_state) > hierarchy.get(workload_state):
return current_workload_state
else:
return workload_state |
def decode_response(client_message, to_object=None):
""" Decode response from client message"""
parameters = dict(base=None, increment=None, batch_size=None)
parameters['base'] = client_message.read_long()
parameters['increment'] = client_message.read_long()
parameters['batch_size'] = client_message.read_int()
return parameters | Decode response from client message | Below is the the instruction that describes the task:
### Input:
Decode response from client message
### Response:
def decode_response(client_message, to_object=None):
""" Decode response from client message"""
parameters = dict(base=None, increment=None, batch_size=None)
parameters['base'] = client_message.read_long()
parameters['increment'] = client_message.read_long()
parameters['batch_size'] = client_message.read_int()
return parameters |
def unlisten_to_node(self, id_):
"""Stop listening to a job
Parameters
----------
id_ : str
An ID to remove
Returns
--------
str or None
The ID removed or None if the ID was not removed
"""
id_pubsub = _pubsub_key(id_)
if id_pubsub in self._listening_to:
del self._listening_to[id_pubsub]
self.toredis.unsubscribe(id_pubsub)
parent = json_decode(r_client.get(id_)).get('parent', None)
if parent is not None:
r_client.srem(_children_key(parent), id_)
r_client.srem(self.group_children, id_)
return id_ | Stop listening to a job
Parameters
----------
id_ : str
An ID to remove
Returns
--------
str or None
The ID removed or None if the ID was not removed | Below is the the instruction that describes the task:
### Input:
Stop listening to a job
Parameters
----------
id_ : str
An ID to remove
Returns
--------
str or None
The ID removed or None if the ID was not removed
### Response:
def unlisten_to_node(self, id_):
"""Stop listening to a job
Parameters
----------
id_ : str
An ID to remove
Returns
--------
str or None
The ID removed or None if the ID was not removed
"""
id_pubsub = _pubsub_key(id_)
if id_pubsub in self._listening_to:
del self._listening_to[id_pubsub]
self.toredis.unsubscribe(id_pubsub)
parent = json_decode(r_client.get(id_)).get('parent', None)
if parent is not None:
r_client.srem(_children_key(parent), id_)
r_client.srem(self.group_children, id_)
return id_ |
def should_display_warnings_for(to_type):
""" Central method where we control whether warnings should be displayed """
if not hasattr(to_type, '__module__'):
return True
elif to_type.__module__ in {'builtins'} or to_type.__module__.startswith('parsyfiles') \
or to_type.__name__ in {'DataFrame'}:
return False
elif issubclass(to_type, int) or issubclass(to_type, str) \
or issubclass(to_type, float) or issubclass(to_type, bool):
return False
else:
return True | Central method where we control whether warnings should be displayed | Below is the the instruction that describes the task:
### Input:
Central method where we control whether warnings should be displayed
### Response:
def should_display_warnings_for(to_type):
""" Central method where we control whether warnings should be displayed """
if not hasattr(to_type, '__module__'):
return True
elif to_type.__module__ in {'builtins'} or to_type.__module__.startswith('parsyfiles') \
or to_type.__name__ in {'DataFrame'}:
return False
elif issubclass(to_type, int) or issubclass(to_type, str) \
or issubclass(to_type, float) or issubclass(to_type, bool):
return False
else:
return True |
def _margtimephase_loglr(self, mf_snr, opt_snr):
"""Returns the log likelihood ratio marginalized over time and phase.
"""
return special.logsumexp(numpy.log(special.i0(mf_snr)),
b=self._deltat) - 0.5*opt_snr | Returns the log likelihood ratio marginalized over time and phase. | Below is the the instruction that describes the task:
### Input:
Returns the log likelihood ratio marginalized over time and phase.
### Response:
def _margtimephase_loglr(self, mf_snr, opt_snr):
"""Returns the log likelihood ratio marginalized over time and phase.
"""
return special.logsumexp(numpy.log(special.i0(mf_snr)),
b=self._deltat) - 0.5*opt_snr |
def from_maildir(self, codes: str) -> FrozenSet[Flag]:
"""Return the set of IMAP flags that correspond to the letter codes.
Args:
codes: The letter codes to map.
"""
flags = set()
for code in codes:
if code == ',':
break
to_sys = self._to_sys.get(code)
if to_sys is not None:
flags.add(to_sys)
else:
to_kwd = self._to_kwd.get(code)
if to_kwd is not None:
flags.add(to_kwd)
return frozenset(flags) | Return the set of IMAP flags that correspond to the letter codes.
Args:
codes: The letter codes to map. | Below is the the instruction that describes the task:
### Input:
Return the set of IMAP flags that correspond to the letter codes.
Args:
codes: The letter codes to map.
### Response:
def from_maildir(self, codes: str) -> FrozenSet[Flag]:
"""Return the set of IMAP flags that correspond to the letter codes.
Args:
codes: The letter codes to map.
"""
flags = set()
for code in codes:
if code == ',':
break
to_sys = self._to_sys.get(code)
if to_sys is not None:
flags.add(to_sys)
else:
to_kwd = self._to_kwd.get(code)
if to_kwd is not None:
flags.add(to_kwd)
return frozenset(flags) |
def safe_makedirs(path):
"""A safe function for creating a directory tree."""
try:
os.makedirs(path)
except OSError as err:
if err.errno == errno.EEXIST:
if not os.path.isdir(path):
raise
else:
raise | A safe function for creating a directory tree. | Below is the the instruction that describes the task:
### Input:
A safe function for creating a directory tree.
### Response:
def safe_makedirs(path):
"""A safe function for creating a directory tree."""
try:
os.makedirs(path)
except OSError as err:
if err.errno == errno.EEXIST:
if not os.path.isdir(path):
raise
else:
raise |
def dolnp3_0(Data):
"""
DEPRECATED!! USE dolnp()
Desciption: takes a list of dicts with the controlled vocabulary of 3_0 and calls dolnp on them after reformating for compatibility.
Parameters
__________
Data : nested list of dictionarys with keys
dir_dec
dir_inc
dir_tilt_correction
method_codes
Returns
-------
ReturnData : dictionary with keys
dec : fisher mean dec of data in Data
inc : fisher mean inc of data in Data
n_lines : number of directed lines [method_code = DE-BFL or DE-FM]
n_planes : number of best fit planes [method_code = DE-BFP]
alpha95 : fisher confidence circle from Data
R : fisher R value of Data
K : fisher k value of Data
Effects
prints to screen in case of no data
"""
if len(Data) == 0:
print("This function requires input Data have at least 1 entry")
return {}
if len(Data) == 1:
ReturnData = {}
ReturnData["dec"] = Data[0]['dir_dec']
ReturnData["inc"] = Data[0]['dir_inc']
ReturnData["n_total"] = '1'
if "DE-BFP" in Data[0]['method_codes']:
ReturnData["n_lines"] = '0'
ReturnData["n_planes"] = '1'
else:
ReturnData["n_planes"] = '0'
ReturnData["n_lines"] = '1'
ReturnData["alpha95"] = ""
ReturnData["R"] = ""
ReturnData["K"] = ""
return ReturnData
else:
LnpData = []
for n, d in enumerate(Data):
LnpData.append({})
LnpData[n]['dec'] = d['dir_dec']
LnpData[n]['inc'] = d['dir_inc']
LnpData[n]['tilt_correction'] = d['dir_tilt_correction']
if 'method_codes' in list(d.keys()):
if "DE-BFP" in d['method_codes']:
LnpData[n]['dir_type'] = 'p'
else:
LnpData[n]['dir_type'] = 'l'
# get a sample average from all specimens
ReturnData = dolnp(LnpData, 'dir_type')
return ReturnData | DEPRECATED!! USE dolnp()
Desciption: takes a list of dicts with the controlled vocabulary of 3_0 and calls dolnp on them after reformating for compatibility.
Parameters
__________
Data : nested list of dictionarys with keys
dir_dec
dir_inc
dir_tilt_correction
method_codes
Returns
-------
ReturnData : dictionary with keys
dec : fisher mean dec of data in Data
inc : fisher mean inc of data in Data
n_lines : number of directed lines [method_code = DE-BFL or DE-FM]
n_planes : number of best fit planes [method_code = DE-BFP]
alpha95 : fisher confidence circle from Data
R : fisher R value of Data
K : fisher k value of Data
Effects
prints to screen in case of no data | Below is the the instruction that describes the task:
### Input:
DEPRECATED!! USE dolnp()
Desciption: takes a list of dicts with the controlled vocabulary of 3_0 and calls dolnp on them after reformating for compatibility.
Parameters
__________
Data : nested list of dictionarys with keys
dir_dec
dir_inc
dir_tilt_correction
method_codes
Returns
-------
ReturnData : dictionary with keys
dec : fisher mean dec of data in Data
inc : fisher mean inc of data in Data
n_lines : number of directed lines [method_code = DE-BFL or DE-FM]
n_planes : number of best fit planes [method_code = DE-BFP]
alpha95 : fisher confidence circle from Data
R : fisher R value of Data
K : fisher k value of Data
Effects
prints to screen in case of no data
### Response:
def dolnp3_0(Data):
"""
DEPRECATED!! USE dolnp()
Desciption: takes a list of dicts with the controlled vocabulary of 3_0 and calls dolnp on them after reformating for compatibility.
Parameters
__________
Data : nested list of dictionarys with keys
dir_dec
dir_inc
dir_tilt_correction
method_codes
Returns
-------
ReturnData : dictionary with keys
dec : fisher mean dec of data in Data
inc : fisher mean inc of data in Data
n_lines : number of directed lines [method_code = DE-BFL or DE-FM]
n_planes : number of best fit planes [method_code = DE-BFP]
alpha95 : fisher confidence circle from Data
R : fisher R value of Data
K : fisher k value of Data
Effects
prints to screen in case of no data
"""
if len(Data) == 0:
print("This function requires input Data have at least 1 entry")
return {}
if len(Data) == 1:
ReturnData = {}
ReturnData["dec"] = Data[0]['dir_dec']
ReturnData["inc"] = Data[0]['dir_inc']
ReturnData["n_total"] = '1'
if "DE-BFP" in Data[0]['method_codes']:
ReturnData["n_lines"] = '0'
ReturnData["n_planes"] = '1'
else:
ReturnData["n_planes"] = '0'
ReturnData["n_lines"] = '1'
ReturnData["alpha95"] = ""
ReturnData["R"] = ""
ReturnData["K"] = ""
return ReturnData
else:
LnpData = []
for n, d in enumerate(Data):
LnpData.append({})
LnpData[n]['dec'] = d['dir_dec']
LnpData[n]['inc'] = d['dir_inc']
LnpData[n]['tilt_correction'] = d['dir_tilt_correction']
if 'method_codes' in list(d.keys()):
if "DE-BFP" in d['method_codes']:
LnpData[n]['dir_type'] = 'p'
else:
LnpData[n]['dir_type'] = 'l'
# get a sample average from all specimens
ReturnData = dolnp(LnpData, 'dir_type')
return ReturnData |
def unsurt(surt):
"""
# Simple surt
>>> unsurt('com,example)/')
'example.com/'
# Broken surt
>>> unsurt('com,example)')
'com,example)'
# Long surt
>>> unsurt('suffix,domain,sub,subsub,another,subdomain)/path/file/\
index.html?a=b?c=)/')
'subdomain.another.subsub.sub.domain.suffix/path/file/index.html?a=b?c=)/'
"""
try:
index = surt.index(')/')
parts = surt[0:index].split(',')
parts.reverse()
host = '.'.join(parts)
host += surt[index + 1:]
return host
except ValueError:
# May not be a valid surt
return surt | # Simple surt
>>> unsurt('com,example)/')
'example.com/'
# Broken surt
>>> unsurt('com,example)')
'com,example)'
# Long surt
>>> unsurt('suffix,domain,sub,subsub,another,subdomain)/path/file/\
index.html?a=b?c=)/')
'subdomain.another.subsub.sub.domain.suffix/path/file/index.html?a=b?c=)/' | Below is the the instruction that describes the task:
### Input:
# Simple surt
>>> unsurt('com,example)/')
'example.com/'
# Broken surt
>>> unsurt('com,example)')
'com,example)'
# Long surt
>>> unsurt('suffix,domain,sub,subsub,another,subdomain)/path/file/\
index.html?a=b?c=)/')
'subdomain.another.subsub.sub.domain.suffix/path/file/index.html?a=b?c=)/'
### Response:
def unsurt(surt):
"""
# Simple surt
>>> unsurt('com,example)/')
'example.com/'
# Broken surt
>>> unsurt('com,example)')
'com,example)'
# Long surt
>>> unsurt('suffix,domain,sub,subsub,another,subdomain)/path/file/\
index.html?a=b?c=)/')
'subdomain.another.subsub.sub.domain.suffix/path/file/index.html?a=b?c=)/'
"""
try:
index = surt.index(')/')
parts = surt[0:index].split(',')
parts.reverse()
host = '.'.join(parts)
host += surt[index + 1:]
return host
except ValueError:
# May not be a valid surt
return surt |
def save(self, *args):
""" Save cache to file using pickle.
Parameters
----------
*args:
All but the last argument are inputs to the cached function. The
last is the actual value of the function.
"""
with open(self.file_root + '.pkl', "wb") as f:
pickle.dump(args, f, protocol=pickle.HIGHEST_PROTOCOL) | Save cache to file using pickle.
Parameters
----------
*args:
All but the last argument are inputs to the cached function. The
last is the actual value of the function. | Below is the the instruction that describes the task:
### Input:
Save cache to file using pickle.
Parameters
----------
*args:
All but the last argument are inputs to the cached function. The
last is the actual value of the function.
### Response:
def save(self, *args):
""" Save cache to file using pickle.
Parameters
----------
*args:
All but the last argument are inputs to the cached function. The
last is the actual value of the function.
"""
with open(self.file_root + '.pkl', "wb") as f:
pickle.dump(args, f, protocol=pickle.HIGHEST_PROTOCOL) |
def update_handler(feeds):
'''Update all cross-referencing filters results for feeds and others, related to them.
Intended to be called from non-Feed update hooks (like new Post saving).'''
# Check if this call is a result of actions initiated from
# one of the hooks in a higher frame (resulting in recursion).
if Feed._filters_update_handler_lock: return
return Feed._filters_update_handler(Feed, feeds, force=True) | Update all cross-referencing filters results for feeds and others, related to them.
Intended to be called from non-Feed update hooks (like new Post saving). | Below is the the instruction that describes the task:
### Input:
Update all cross-referencing filters results for feeds and others, related to them.
Intended to be called from non-Feed update hooks (like new Post saving).
### Response:
def update_handler(feeds):
'''Update all cross-referencing filters results for feeds and others, related to them.
Intended to be called from non-Feed update hooks (like new Post saving).'''
# Check if this call is a result of actions initiated from
# one of the hooks in a higher frame (resulting in recursion).
if Feed._filters_update_handler_lock: return
return Feed._filters_update_handler(Feed, feeds, force=True) |
def load_edbfile(file=None):
"""Load the targets from a file"""
import ephem,string,math
if file is None:
import tkFileDialog
try:
file=tkFileDialog.askopenfilename()
except:
return
if file is None or file == '':
return
f=open(file)
lines=f.readlines()
f.close()
for line in lines:
p=line.split(',')
name=p[0].strip().upper()
mpc_objs[name]=ephem.readdb(line)
mpc_objs[name].compute()
objInfoDict[name]="%6s %6s %6s\n" % ( string.center("a",6),
string.center("e",6),
string.center("i",6) )
objInfoDict[name]+="%6.2f %6.3f %6.2f\n" % (mpc_objs[name]._a,mpc_objs[name]._e,math.degrees(mpc_objs[name]._inc))
objInfoDict[name]+="%7.2f %7.2f\n" % ( mpc_objs[name].earth_distance, mpc_objs[name].mag)
doplot(mpc_objs) | Load the targets from a file | Below is the the instruction that describes the task:
### Input:
Load the targets from a file
### Response:
def load_edbfile(file=None):
"""Load the targets from a file"""
import ephem,string,math
if file is None:
import tkFileDialog
try:
file=tkFileDialog.askopenfilename()
except:
return
if file is None or file == '':
return
f=open(file)
lines=f.readlines()
f.close()
for line in lines:
p=line.split(',')
name=p[0].strip().upper()
mpc_objs[name]=ephem.readdb(line)
mpc_objs[name].compute()
objInfoDict[name]="%6s %6s %6s\n" % ( string.center("a",6),
string.center("e",6),
string.center("i",6) )
objInfoDict[name]+="%6.2f %6.3f %6.2f\n" % (mpc_objs[name]._a,mpc_objs[name]._e,math.degrees(mpc_objs[name]._inc))
objInfoDict[name]+="%7.2f %7.2f\n" % ( mpc_objs[name].earth_distance, mpc_objs[name].mag)
doplot(mpc_objs) |
def _api_post(self, url, **kwargs):
"""
A convenience wrapper for _post. Adds headers, auth and base url by
default
"""
kwargs['url'] = self.url + url
kwargs['auth'] = self.auth
headers = deepcopy(self.headers)
headers.update(kwargs.get('headers', {}))
kwargs['headers'] = headers
self._post(**kwargs) | A convenience wrapper for _post. Adds headers, auth and base url by
default | Below is the the instruction that describes the task:
### Input:
A convenience wrapper for _post. Adds headers, auth and base url by
default
### Response:
def _api_post(self, url, **kwargs):
"""
A convenience wrapper for _post. Adds headers, auth and base url by
default
"""
kwargs['url'] = self.url + url
kwargs['auth'] = self.auth
headers = deepcopy(self.headers)
headers.update(kwargs.get('headers', {}))
kwargs['headers'] = headers
self._post(**kwargs) |
def update_batch_count(instance, **kwargs):
"""Sample post-save handler to update the sample's batch count.
Batches are unpublished by default (to prevent publishing empty batches).
If the `AUTO_PUBLISH_BATCH` setting is true, the batch will be published
automatically when at least one published sample is in the batch.
"""
batch = instance.batch
count = batch.samples.filter(published=True).count()
if count != batch.count:
batch.count = count
if AUTO_PUBLISH_BATCH:
batch.published = bool(count)
batch.save() | Sample post-save handler to update the sample's batch count.
Batches are unpublished by default (to prevent publishing empty batches).
If the `AUTO_PUBLISH_BATCH` setting is true, the batch will be published
automatically when at least one published sample is in the batch. | Below is the the instruction that describes the task:
### Input:
Sample post-save handler to update the sample's batch count.
Batches are unpublished by default (to prevent publishing empty batches).
If the `AUTO_PUBLISH_BATCH` setting is true, the batch will be published
automatically when at least one published sample is in the batch.
### Response:
def update_batch_count(instance, **kwargs):
"""Sample post-save handler to update the sample's batch count.
Batches are unpublished by default (to prevent publishing empty batches).
If the `AUTO_PUBLISH_BATCH` setting is true, the batch will be published
automatically when at least one published sample is in the batch.
"""
batch = instance.batch
count = batch.samples.filter(published=True).count()
if count != batch.count:
batch.count = count
if AUTO_PUBLISH_BATCH:
batch.published = bool(count)
batch.save() |
def read_settings(self):
"""Set the dock state from QSettings.
Do this on init and after changing options in the options dialog.
"""
extent = setting('user_extent', None, str)
if extent:
extent = QgsGeometry.fromWkt(extent)
if not extent.isGeosValid():
extent = None
crs = setting('user_extent_crs', None, str)
if crs:
crs = QgsCoordinateReferenceSystem(crs)
if not crs.isValid():
crs = None
mode = setting('analysis_extents_mode', HAZARD_EXPOSURE_VIEW)
if crs and extent and mode == HAZARD_EXPOSURE_BOUNDINGBOX:
self.extent.set_user_extent(extent, crs)
# It's better to set the show_rubber_bands after setting the user
# extent.
self.extent.show_rubber_bands = setting(
'showRubberBands', False, bool)
flag = setting('visibleLayersOnlyFlag', True, bool)
self.show_only_visible_layers_flag = flag
flag = setting('set_layer_from_title_flag', True, bool)
self.set_layer_from_title_flag = flag
self.zoom_to_impact_flag = setting('setZoomToImpactFlag', True, bool)
# whether exposure layer should be hidden after model completes
self.hide_exposure_flag = setting('setHideExposureFlag', False, bool)
# whether to show or not dev only options
self.developer_mode = setting('developer_mode', False, bool)
# If we use selected features only
flag = setting('useSelectedFeaturesOnly', True, bool)
self.use_selected_features_only = flag
# We need to re-trigger the aggregation combobox with the new flag.
index = self.aggregation_layer_combo.currentIndex()
self.aggregation_layer_combo.setCurrentIndex(index)
# whether to show or not a custom Logo
flag = setting('organisation_logo_path', supporters_logo_path(), str)
self.organisation_logo_path = flag
# Changed default to False for new users in 3.2 - see #2171
show_logos_flag = setting(
'showOrganisationLogoInDockFlag', False, bool)
# Flag to check valid organization logo
invalid_logo_size = False
logo_not_exist = False
if self.organisation_logo_path:
dock_width = float(self.width())
# Dont let the image be more than 100px height
maximum_height = 100.0 # px
pixmap = QPixmap(self.organisation_logo_path)
# it will throw Overflow Error if pixmap.height() == 0
if pixmap.height() > 0:
height_ratio = maximum_height / pixmap.height()
maximum_width = int(pixmap.width() * height_ratio)
# Don't let the image be more than the dock width wide
if maximum_width > dock_width:
width_ratio = dock_width / float(pixmap.width())
maximum_height = int(pixmap.height() * width_ratio)
maximum_width = dock_width
too_high = pixmap.height() > maximum_height
too_wide = pixmap.width() > dock_width
if too_wide or too_high:
pixmap = pixmap.scaled(
maximum_width, maximum_height, Qt.KeepAspectRatio)
self.organisation_logo.setMaximumWidth(maximum_width)
# We have manually scaled using logic above
self.organisation_logo.setScaledContents(False)
self.organisation_logo.setPixmap(pixmap)
else:
# handle zero pixmap height and or nonexistent files
if not os.path.exists(self.organisation_logo_path):
logo_not_exist = True
else:
invalid_logo_size = True
if (self.organisation_logo_path
and show_logos_flag
and not invalid_logo_size
and not logo_not_exist):
self._show_organisation_logo()
else:
self.organisation_logo.hide()
# RM: this is a fix for nonexistent organization logo or zero height
if logo_not_exist:
# noinspection PyCallByClass
QMessageBox.warning(
self, self.tr('InaSAFE %s' % self.inasafe_version),
self.tr(
'The file for organization logo in %s doesn\'t exists. '
'Please check in Plugins -> InaSAFE -> Options that your '
'paths are still correct and update them if needed.' %
self.organisation_logo_path
), QMessageBox.Ok)
if invalid_logo_size:
# noinspection PyCallByClass
QMessageBox.warning(
self,
self.tr('InaSAFE %s' % self.inasafe_version),
self.tr(
'The file for organization logo has zero height. Please '
'provide valid file for organization logo.'
), QMessageBox.Ok)
if logo_not_exist or invalid_logo_size:
set_setting('organisation_logo_path', supporters_logo_path()) | Set the dock state from QSettings.
Do this on init and after changing options in the options dialog. | Below is the the instruction that describes the task:
### Input:
Set the dock state from QSettings.
Do this on init and after changing options in the options dialog.
### Response:
def read_settings(self):
"""Set the dock state from QSettings.
Do this on init and after changing options in the options dialog.
"""
extent = setting('user_extent', None, str)
if extent:
extent = QgsGeometry.fromWkt(extent)
if not extent.isGeosValid():
extent = None
crs = setting('user_extent_crs', None, str)
if crs:
crs = QgsCoordinateReferenceSystem(crs)
if not crs.isValid():
crs = None
mode = setting('analysis_extents_mode', HAZARD_EXPOSURE_VIEW)
if crs and extent and mode == HAZARD_EXPOSURE_BOUNDINGBOX:
self.extent.set_user_extent(extent, crs)
# It's better to set the show_rubber_bands after setting the user
# extent.
self.extent.show_rubber_bands = setting(
'showRubberBands', False, bool)
flag = setting('visibleLayersOnlyFlag', True, bool)
self.show_only_visible_layers_flag = flag
flag = setting('set_layer_from_title_flag', True, bool)
self.set_layer_from_title_flag = flag
self.zoom_to_impact_flag = setting('setZoomToImpactFlag', True, bool)
# whether exposure layer should be hidden after model completes
self.hide_exposure_flag = setting('setHideExposureFlag', False, bool)
# whether to show or not dev only options
self.developer_mode = setting('developer_mode', False, bool)
# If we use selected features only
flag = setting('useSelectedFeaturesOnly', True, bool)
self.use_selected_features_only = flag
# We need to re-trigger the aggregation combobox with the new flag.
index = self.aggregation_layer_combo.currentIndex()
self.aggregation_layer_combo.setCurrentIndex(index)
# whether to show or not a custom Logo
flag = setting('organisation_logo_path', supporters_logo_path(), str)
self.organisation_logo_path = flag
# Changed default to False for new users in 3.2 - see #2171
show_logos_flag = setting(
'showOrganisationLogoInDockFlag', False, bool)
# Flag to check valid organization logo
invalid_logo_size = False
logo_not_exist = False
if self.organisation_logo_path:
dock_width = float(self.width())
# Dont let the image be more than 100px height
maximum_height = 100.0 # px
pixmap = QPixmap(self.organisation_logo_path)
# it will throw Overflow Error if pixmap.height() == 0
if pixmap.height() > 0:
height_ratio = maximum_height / pixmap.height()
maximum_width = int(pixmap.width() * height_ratio)
# Don't let the image be more than the dock width wide
if maximum_width > dock_width:
width_ratio = dock_width / float(pixmap.width())
maximum_height = int(pixmap.height() * width_ratio)
maximum_width = dock_width
too_high = pixmap.height() > maximum_height
too_wide = pixmap.width() > dock_width
if too_wide or too_high:
pixmap = pixmap.scaled(
maximum_width, maximum_height, Qt.KeepAspectRatio)
self.organisation_logo.setMaximumWidth(maximum_width)
# We have manually scaled using logic above
self.organisation_logo.setScaledContents(False)
self.organisation_logo.setPixmap(pixmap)
else:
# handle zero pixmap height and or nonexistent files
if not os.path.exists(self.organisation_logo_path):
logo_not_exist = True
else:
invalid_logo_size = True
if (self.organisation_logo_path
and show_logos_flag
and not invalid_logo_size
and not logo_not_exist):
self._show_organisation_logo()
else:
self.organisation_logo.hide()
# RM: this is a fix for nonexistent organization logo or zero height
if logo_not_exist:
# noinspection PyCallByClass
QMessageBox.warning(
self, self.tr('InaSAFE %s' % self.inasafe_version),
self.tr(
'The file for organization logo in %s doesn\'t exists. '
'Please check in Plugins -> InaSAFE -> Options that your '
'paths are still correct and update them if needed.' %
self.organisation_logo_path
), QMessageBox.Ok)
if invalid_logo_size:
# noinspection PyCallByClass
QMessageBox.warning(
self,
self.tr('InaSAFE %s' % self.inasafe_version),
self.tr(
'The file for organization logo has zero height. Please '
'provide valid file for organization logo.'
), QMessageBox.Ok)
if logo_not_exist or invalid_logo_size:
set_setting('organisation_logo_path', supporters_logo_path()) |
def assign(self, bugids, user):
"""
Assign a bug to a user.
param bugid: ``int``, bug ID number.
param user: ``str``, the login name of the user to whom the bug is
assigned
returns: deferred that when fired returns True if the change succeeded,
False if the change was unnecessary (because the user is
already assigned.)
"""
payload = {'ids': (bugids,), 'assigned_to': user}
d = self.call('Bug.update', payload)
d.addCallback(self._parse_bug_assigned_callback)
return d | Assign a bug to a user.
param bugid: ``int``, bug ID number.
param user: ``str``, the login name of the user to whom the bug is
assigned
returns: deferred that when fired returns True if the change succeeded,
False if the change was unnecessary (because the user is
already assigned.) | Below is the the instruction that describes the task:
### Input:
Assign a bug to a user.
param bugid: ``int``, bug ID number.
param user: ``str``, the login name of the user to whom the bug is
assigned
returns: deferred that when fired returns True if the change succeeded,
False if the change was unnecessary (because the user is
already assigned.)
### Response:
def assign(self, bugids, user):
"""
Assign a bug to a user.
param bugid: ``int``, bug ID number.
param user: ``str``, the login name of the user to whom the bug is
assigned
returns: deferred that when fired returns True if the change succeeded,
False if the change was unnecessary (because the user is
already assigned.)
"""
payload = {'ids': (bugids,), 'assigned_to': user}
d = self.call('Bug.update', payload)
d.addCallback(self._parse_bug_assigned_callback)
return d |
def start(name=None,
user=None,
group=None,
chroot=None,
caps=None,
no_caps=False,
pidfile=None,
enable_core=False,
fd_limit=None,
verbose=False,
debug=False,
trace=False,
yydebug=False,
persist_file=None,
control=None,
worker_threads=None):
'''
Ensures, that syslog-ng is started via the given parameters. This function
is intended to be used from the state module.
Users shouldn't use this function, if the service module is available on
their system. If :mod:`syslog_ng.set_config_file
<salt.modules.syslog_ng.set_binary_path>`, is called before, this function
will use the set binary path.
CLI Example:
.. code-block:: bash
salt '*' syslog_ng.start
'''
params = []
_add_cli_param(params, 'user', user)
_add_cli_param(params, 'group', group)
_add_cli_param(params, 'chroot', chroot)
_add_cli_param(params, 'caps', caps)
_add_boolean_cli_param(params, 'no-capse', no_caps)
_add_cli_param(params, 'pidfile', pidfile)
_add_boolean_cli_param(params, 'enable-core', enable_core)
_add_cli_param(params, 'fd-limit', fd_limit)
_add_boolean_cli_param(params, 'verbose', verbose)
_add_boolean_cli_param(params, 'debug', debug)
_add_boolean_cli_param(params, 'trace', trace)
_add_boolean_cli_param(params, 'yydebug', yydebug)
_add_cli_param(params, 'cfgfile', __SYSLOG_NG_CONFIG_FILE)
_add_boolean_cli_param(params, 'persist-file', persist_file)
_add_cli_param(params, 'control', control)
_add_cli_param(params, 'worker-threads', worker_threads)
if __SYSLOG_NG_BINARY_PATH:
syslog_ng_binary = os.path.join(__SYSLOG_NG_BINARY_PATH, 'syslog-ng')
command = [syslog_ng_binary] + params
if __opts__.get('test', False):
comment = 'Syslog_ng state module will start {0}'.format(command)
return _format_state_result(name, result=None, comment=comment)
result = __salt__['cmd.run_all'](command, python_shell=False)
else:
command = ['syslog-ng'] + params
if __opts__.get('test', False):
comment = 'Syslog_ng state module will start {0}'.format(command)
return _format_state_result(name, result=None, comment=comment)
result = __salt__['cmd.run_all'](command, python_shell=False)
if result['pid'] > 0:
succ = True
else:
succ = False
return _format_state_result(
name, result=succ, changes={'new': ' '.join(command), 'old': ''}
) | Ensures, that syslog-ng is started via the given parameters. This function
is intended to be used from the state module.
Users shouldn't use this function, if the service module is available on
their system. If :mod:`syslog_ng.set_config_file
<salt.modules.syslog_ng.set_binary_path>`, is called before, this function
will use the set binary path.
CLI Example:
.. code-block:: bash
salt '*' syslog_ng.start | Below is the the instruction that describes the task:
### Input:
Ensures, that syslog-ng is started via the given parameters. This function
is intended to be used from the state module.
Users shouldn't use this function, if the service module is available on
their system. If :mod:`syslog_ng.set_config_file
<salt.modules.syslog_ng.set_binary_path>`, is called before, this function
will use the set binary path.
CLI Example:
.. code-block:: bash
salt '*' syslog_ng.start
### Response:
def start(name=None,
user=None,
group=None,
chroot=None,
caps=None,
no_caps=False,
pidfile=None,
enable_core=False,
fd_limit=None,
verbose=False,
debug=False,
trace=False,
yydebug=False,
persist_file=None,
control=None,
worker_threads=None):
'''
Ensures, that syslog-ng is started via the given parameters. This function
is intended to be used from the state module.
Users shouldn't use this function, if the service module is available on
their system. If :mod:`syslog_ng.set_config_file
<salt.modules.syslog_ng.set_binary_path>`, is called before, this function
will use the set binary path.
CLI Example:
.. code-block:: bash
salt '*' syslog_ng.start
'''
params = []
_add_cli_param(params, 'user', user)
_add_cli_param(params, 'group', group)
_add_cli_param(params, 'chroot', chroot)
_add_cli_param(params, 'caps', caps)
_add_boolean_cli_param(params, 'no-capse', no_caps)
_add_cli_param(params, 'pidfile', pidfile)
_add_boolean_cli_param(params, 'enable-core', enable_core)
_add_cli_param(params, 'fd-limit', fd_limit)
_add_boolean_cli_param(params, 'verbose', verbose)
_add_boolean_cli_param(params, 'debug', debug)
_add_boolean_cli_param(params, 'trace', trace)
_add_boolean_cli_param(params, 'yydebug', yydebug)
_add_cli_param(params, 'cfgfile', __SYSLOG_NG_CONFIG_FILE)
_add_boolean_cli_param(params, 'persist-file', persist_file)
_add_cli_param(params, 'control', control)
_add_cli_param(params, 'worker-threads', worker_threads)
if __SYSLOG_NG_BINARY_PATH:
syslog_ng_binary = os.path.join(__SYSLOG_NG_BINARY_PATH, 'syslog-ng')
command = [syslog_ng_binary] + params
if __opts__.get('test', False):
comment = 'Syslog_ng state module will start {0}'.format(command)
return _format_state_result(name, result=None, comment=comment)
result = __salt__['cmd.run_all'](command, python_shell=False)
else:
command = ['syslog-ng'] + params
if __opts__.get('test', False):
comment = 'Syslog_ng state module will start {0}'.format(command)
return _format_state_result(name, result=None, comment=comment)
result = __salt__['cmd.run_all'](command, python_shell=False)
if result['pid'] > 0:
succ = True
else:
succ = False
return _format_state_result(
name, result=succ, changes={'new': ' '.join(command), 'old': ''}
) |
def annotation(self, type, set=None):
"""Will return a **single** annotation (even if there are multiple). Raises a ``NoSuchAnnotation`` exception if none was found"""
l = self.count(type,set,True,default_ignore_annotations)
if len(l) >= 1:
return l[0]
else:
raise NoSuchAnnotation() | Will return a **single** annotation (even if there are multiple). Raises a ``NoSuchAnnotation`` exception if none was found | Below is the the instruction that describes the task:
### Input:
Will return a **single** annotation (even if there are multiple). Raises a ``NoSuchAnnotation`` exception if none was found
### Response:
def annotation(self, type, set=None):
"""Will return a **single** annotation (even if there are multiple). Raises a ``NoSuchAnnotation`` exception if none was found"""
l = self.count(type,set,True,default_ignore_annotations)
if len(l) >= 1:
return l[0]
else:
raise NoSuchAnnotation() |
def createCollection(self, className = 'Collection', **colProperties) :
"""Creates a collection and returns it.
ClassName the name of a class inheriting from Collection or Egdes, it can also be set to 'Collection' or 'Edges' in order to create untyped collections of documents or edges.
Use colProperties to put things such as 'waitForSync = True' (see ArangoDB's doc
for a full list of possible arugments). If a '_properties' dictionary is defined in the collection schema, arguments to this function overide it"""
colClass = COL.getCollectionClass(className)
if len(colProperties) > 0 :
colProperties = dict(colProperties)
else :
try :
colProperties = dict(colClass._properties)
except AttributeError :
colProperties = {}
if className != 'Collection' and className != 'Edges' :
colProperties['name'] = className
else :
if 'name' not in colProperties :
raise ValueError("a 'name' argument mush be supplied if you want to create a generic collection")
if colProperties['name'] in self.collections :
raise CreationError("Database %s already has a collection named %s" % (self.name, colProperties['name']) )
if issubclass(colClass, COL.Edges) or colClass.__class__ is COL.Edges:
colProperties["type"] = CONST.COLLECTION_EDGE_TYPE
else :
colProperties["type"] = CONST.COLLECTION_DOCUMENT_TYPE
payload = json.dumps(colProperties, default=str)
r = self.connection.session.post(self.collectionsURL, data = payload)
data = r.json()
if r.status_code == 200 and not data["error"] :
col = colClass(self, data)
self.collections[col.name] = col
return self.collections[col.name]
else :
raise CreationError(data["errorMessage"], data) | Creates a collection and returns it.
ClassName the name of a class inheriting from Collection or Egdes, it can also be set to 'Collection' or 'Edges' in order to create untyped collections of documents or edges.
Use colProperties to put things such as 'waitForSync = True' (see ArangoDB's doc
for a full list of possible arugments). If a '_properties' dictionary is defined in the collection schema, arguments to this function overide it | Below is the the instruction that describes the task:
### Input:
Creates a collection and returns it.
ClassName the name of a class inheriting from Collection or Egdes, it can also be set to 'Collection' or 'Edges' in order to create untyped collections of documents or edges.
Use colProperties to put things such as 'waitForSync = True' (see ArangoDB's doc
for a full list of possible arugments). If a '_properties' dictionary is defined in the collection schema, arguments to this function overide it
### Response:
def createCollection(self, className = 'Collection', **colProperties) :
"""Creates a collection and returns it.
ClassName the name of a class inheriting from Collection or Egdes, it can also be set to 'Collection' or 'Edges' in order to create untyped collections of documents or edges.
Use colProperties to put things such as 'waitForSync = True' (see ArangoDB's doc
for a full list of possible arugments). If a '_properties' dictionary is defined in the collection schema, arguments to this function overide it"""
colClass = COL.getCollectionClass(className)
if len(colProperties) > 0 :
colProperties = dict(colProperties)
else :
try :
colProperties = dict(colClass._properties)
except AttributeError :
colProperties = {}
if className != 'Collection' and className != 'Edges' :
colProperties['name'] = className
else :
if 'name' not in colProperties :
raise ValueError("a 'name' argument mush be supplied if you want to create a generic collection")
if colProperties['name'] in self.collections :
raise CreationError("Database %s already has a collection named %s" % (self.name, colProperties['name']) )
if issubclass(colClass, COL.Edges) or colClass.__class__ is COL.Edges:
colProperties["type"] = CONST.COLLECTION_EDGE_TYPE
else :
colProperties["type"] = CONST.COLLECTION_DOCUMENT_TYPE
payload = json.dumps(colProperties, default=str)
r = self.connection.session.post(self.collectionsURL, data = payload)
data = r.json()
if r.status_code == 200 and not data["error"] :
col = colClass(self, data)
self.collections[col.name] = col
return self.collections[col.name]
else :
raise CreationError(data["errorMessage"], data) |
def visualRectRC(self, row, column):
"""The rectangle for the bounds of the item at *row*, *column*
:param row: row of the item
:type row: int
:param column: column of the item
:type column: int
:returns: :qtdoc:`QRect` -- rectangle of the borders of the item
"""
rect = self._rects[row][column]
if rect.isValid():
return QtCore.QRect(rect.x() - self.horizontalScrollBar().value(),
rect.y() - self.verticalScrollBar().value(),
rect.width(), rect.height())
else:
return rect | The rectangle for the bounds of the item at *row*, *column*
:param row: row of the item
:type row: int
:param column: column of the item
:type column: int
:returns: :qtdoc:`QRect` -- rectangle of the borders of the item | Below is the the instruction that describes the task:
### Input:
The rectangle for the bounds of the item at *row*, *column*
:param row: row of the item
:type row: int
:param column: column of the item
:type column: int
:returns: :qtdoc:`QRect` -- rectangle of the borders of the item
### Response:
def visualRectRC(self, row, column):
"""The rectangle for the bounds of the item at *row*, *column*
:param row: row of the item
:type row: int
:param column: column of the item
:type column: int
:returns: :qtdoc:`QRect` -- rectangle of the borders of the item
"""
rect = self._rects[row][column]
if rect.isValid():
return QtCore.QRect(rect.x() - self.horizontalScrollBar().value(),
rect.y() - self.verticalScrollBar().value(),
rect.width(), rect.height())
else:
return rect |
def triangle(self, verts=True, lines=True):
"""
Converts actor polygons and strips to triangles.
"""
tf = vtk.vtkTriangleFilter()
tf.SetPassLines(lines)
tf.SetPassVerts(verts)
tf.SetInputData(self.poly)
tf.Update()
return self.updateMesh(tf.GetOutput()) | Converts actor polygons and strips to triangles. | Below is the the instruction that describes the task:
### Input:
Converts actor polygons and strips to triangles.
### Response:
def triangle(self, verts=True, lines=True):
"""
Converts actor polygons and strips to triangles.
"""
tf = vtk.vtkTriangleFilter()
tf.SetPassLines(lines)
tf.SetPassVerts(verts)
tf.SetInputData(self.poly)
tf.Update()
return self.updateMesh(tf.GetOutput()) |
def makeOuputDir(outputDir, force):
"""
Create or check for an output directory.
@param outputDir: A C{str} output directory name, or C{None}.
@param force: If C{True}, allow overwriting of pre-existing files.
@return: The C{str} output directory name.
"""
if outputDir:
if exists(outputDir):
if not force:
print('Will not overwrite pre-existing files. Use --force to '
'make me.', file=sys.stderr)
sys.exit(1)
else:
mkdir(outputDir)
else:
outputDir = mkdtemp()
print('Writing output files to %s' % outputDir)
return outputDir | Create or check for an output directory.
@param outputDir: A C{str} output directory name, or C{None}.
@param force: If C{True}, allow overwriting of pre-existing files.
@return: The C{str} output directory name. | Below is the the instruction that describes the task:
### Input:
Create or check for an output directory.
@param outputDir: A C{str} output directory name, or C{None}.
@param force: If C{True}, allow overwriting of pre-existing files.
@return: The C{str} output directory name.
### Response:
def makeOuputDir(outputDir, force):
"""
Create or check for an output directory.
@param outputDir: A C{str} output directory name, or C{None}.
@param force: If C{True}, allow overwriting of pre-existing files.
@return: The C{str} output directory name.
"""
if outputDir:
if exists(outputDir):
if not force:
print('Will not overwrite pre-existing files. Use --force to '
'make me.', file=sys.stderr)
sys.exit(1)
else:
mkdir(outputDir)
else:
outputDir = mkdtemp()
print('Writing output files to %s' % outputDir)
return outputDir |
def imshow(
self,
data=None,
save=False,
ax=None,
interpolation="none",
extra_title=None,
show_resonances="some",
set_extent=True,
equalized=False,
rmin=None,
rmax=None,
savepath=".",
**kwargs,
):
"""Powerful default display.
show_resonances can be True, a list, 'all', or 'some'
"""
if data is None:
data = self.img
if self.resonance_axis is not None:
logger.debug("removing resonance_axis")
self.resonance_axis.remove()
if equalized:
data = np.nan_to_num(data)
data[data < 0] = 0
data = exposure.equalize_hist(data)
self.plotted_data = data
extent_val = self.extent if set_extent else None
min_, max_ = self.plot_limits
self.min_ = min_
self.max_ = max_
if ax is None:
if not _SEABORN_INSTALLED:
fig, ax = plt.subplots(figsize=calc_4_3(8))
else:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
with quantity_support():
im = ax.imshow(
data,
extent=extent_val,
cmap="gray",
vmin=min_,
vmax=max_,
interpolation=interpolation,
origin="lower",
aspect="auto",
**kwargs,
)
if any([rmin is not None, rmax is not None]):
ax.set_ylim(rmin, rmax)
self.mpl_im = im
ax.set_xlabel("Longitude [deg]")
ax.set_ylabel("Radius [Mm]")
ax.ticklabel_format(useOffset=False)
# ax.grid('on')
title = self.plot_title
if extra_title:
title += ", " + extra_title
ax.set_title(title, fontsize=12)
if show_resonances:
self.set_resonance_axis(ax, show_resonances, rmin, rmax)
if save:
savename = self.plotfname
if extra_title:
savename = savename[:-4] + "_" + extra_title + ".png"
p = Path(savename)
fullpath = Path(savepath) / p.name
fig.savefig(fullpath, dpi=150)
logging.info("Created %s", fullpath)
self.im = im
return im | Powerful default display.
show_resonances can be True, a list, 'all', or 'some' | Below is the the instruction that describes the task:
### Input:
Powerful default display.
show_resonances can be True, a list, 'all', or 'some'
### Response:
def imshow(
self,
data=None,
save=False,
ax=None,
interpolation="none",
extra_title=None,
show_resonances="some",
set_extent=True,
equalized=False,
rmin=None,
rmax=None,
savepath=".",
**kwargs,
):
"""Powerful default display.
show_resonances can be True, a list, 'all', or 'some'
"""
if data is None:
data = self.img
if self.resonance_axis is not None:
logger.debug("removing resonance_axis")
self.resonance_axis.remove()
if equalized:
data = np.nan_to_num(data)
data[data < 0] = 0
data = exposure.equalize_hist(data)
self.plotted_data = data
extent_val = self.extent if set_extent else None
min_, max_ = self.plot_limits
self.min_ = min_
self.max_ = max_
if ax is None:
if not _SEABORN_INSTALLED:
fig, ax = plt.subplots(figsize=calc_4_3(8))
else:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
with quantity_support():
im = ax.imshow(
data,
extent=extent_val,
cmap="gray",
vmin=min_,
vmax=max_,
interpolation=interpolation,
origin="lower",
aspect="auto",
**kwargs,
)
if any([rmin is not None, rmax is not None]):
ax.set_ylim(rmin, rmax)
self.mpl_im = im
ax.set_xlabel("Longitude [deg]")
ax.set_ylabel("Radius [Mm]")
ax.ticklabel_format(useOffset=False)
# ax.grid('on')
title = self.plot_title
if extra_title:
title += ", " + extra_title
ax.set_title(title, fontsize=12)
if show_resonances:
self.set_resonance_axis(ax, show_resonances, rmin, rmax)
if save:
savename = self.plotfname
if extra_title:
savename = savename[:-4] + "_" + extra_title + ".png"
p = Path(savename)
fullpath = Path(savepath) / p.name
fig.savefig(fullpath, dpi=150)
logging.info("Created %s", fullpath)
self.im = im
return im |
def refresh(name):
'''
Initiate a Traffic Server configuration file reread. Use this command to
update the running configuration after any configuration file modification.
The timestamp of the last reconfiguration event (in seconds since epoch) is
published in the proxy.node.config.reconfigure_time metric.
.. code-block:: yaml
refresh_ats:
trafficserver.refresh
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if __opts__['test']:
ret['comment'] = 'Refreshing local node configuration'
return ret
__salt__['trafficserver.refresh']()
ret['result'] = True
ret['comment'] = 'Refreshed local node configuration'
return ret | Initiate a Traffic Server configuration file reread. Use this command to
update the running configuration after any configuration file modification.
The timestamp of the last reconfiguration event (in seconds since epoch) is
published in the proxy.node.config.reconfigure_time metric.
.. code-block:: yaml
refresh_ats:
trafficserver.refresh | Below is the the instruction that describes the task:
### Input:
Initiate a Traffic Server configuration file reread. Use this command to
update the running configuration after any configuration file modification.
The timestamp of the last reconfiguration event (in seconds since epoch) is
published in the proxy.node.config.reconfigure_time metric.
.. code-block:: yaml
refresh_ats:
trafficserver.refresh
### Response:
def refresh(name):
'''
Initiate a Traffic Server configuration file reread. Use this command to
update the running configuration after any configuration file modification.
The timestamp of the last reconfiguration event (in seconds since epoch) is
published in the proxy.node.config.reconfigure_time metric.
.. code-block:: yaml
refresh_ats:
trafficserver.refresh
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if __opts__['test']:
ret['comment'] = 'Refreshing local node configuration'
return ret
__salt__['trafficserver.refresh']()
ret['result'] = True
ret['comment'] = 'Refreshed local node configuration'
return ret |
def min_pulse_sp(self):
"""
Used to set the pulse size in milliseconds for the signal that tells the
servo to drive to the miniumum (counter-clockwise) position_sp. Default value
is 600. Valid values are 300 to 700. You must write to the position_sp
attribute for changes to this attribute to take effect.
"""
self._min_pulse_sp, value = self.get_attr_int(self._min_pulse_sp, 'min_pulse_sp')
return value | Used to set the pulse size in milliseconds for the signal that tells the
servo to drive to the miniumum (counter-clockwise) position_sp. Default value
is 600. Valid values are 300 to 700. You must write to the position_sp
attribute for changes to this attribute to take effect. | Below is the the instruction that describes the task:
### Input:
Used to set the pulse size in milliseconds for the signal that tells the
servo to drive to the miniumum (counter-clockwise) position_sp. Default value
is 600. Valid values are 300 to 700. You must write to the position_sp
attribute for changes to this attribute to take effect.
### Response:
def min_pulse_sp(self):
"""
Used to set the pulse size in milliseconds for the signal that tells the
servo to drive to the miniumum (counter-clockwise) position_sp. Default value
is 600. Valid values are 300 to 700. You must write to the position_sp
attribute for changes to this attribute to take effect.
"""
self._min_pulse_sp, value = self.get_attr_int(self._min_pulse_sp, 'min_pulse_sp')
return value |
def _get_bufsize_linux(iface):
'''
Return network interface buffer information using ethtool
'''
ret = {'result': False}
cmd = '/sbin/ethtool -g {0}'.format(iface)
out = __salt__['cmd.run'](cmd)
pat = re.compile(r'^(.+):\s+(\d+)$')
suffix = 'max-'
for line in out.splitlines():
res = pat.match(line)
if res:
ret[res.group(1).lower().replace(' ', '-') + suffix] = int(res.group(2))
ret['result'] = True
elif line.endswith('maximums:'):
suffix = '-max'
elif line.endswith('settings:'):
suffix = ''
if not ret['result']:
parts = out.split()
# remove shell cmd prefix from msg
if parts[0].endswith('sh:'):
out = ' '.join(parts[1:])
ret['comment'] = out
return ret | Return network interface buffer information using ethtool | Below is the the instruction that describes the task:
### Input:
Return network interface buffer information using ethtool
### Response:
def _get_bufsize_linux(iface):
'''
Return network interface buffer information using ethtool
'''
ret = {'result': False}
cmd = '/sbin/ethtool -g {0}'.format(iface)
out = __salt__['cmd.run'](cmd)
pat = re.compile(r'^(.+):\s+(\d+)$')
suffix = 'max-'
for line in out.splitlines():
res = pat.match(line)
if res:
ret[res.group(1).lower().replace(' ', '-') + suffix] = int(res.group(2))
ret['result'] = True
elif line.endswith('maximums:'):
suffix = '-max'
elif line.endswith('settings:'):
suffix = ''
if not ret['result']:
parts = out.split()
# remove shell cmd prefix from msg
if parts[0].endswith('sh:'):
out = ' '.join(parts[1:])
ret['comment'] = out
return ret |
def format_text_as_docstr(text):
r"""
CommandLine:
python ~/local/vim/rc/pyvim_funcs.py --test-format_text_as_docstr
Example:
>>> # DISABLE_DOCTEST
>>> from pyvim_funcs import * # NOQA
>>> text = testdata_text()
>>> formated_text = format_text_as_docstr(text)
>>> result = ('formated_text = \n%s' % (str(formated_text),))
>>> print(result)
"""
import utool as ut
import re
min_indent = ut.get_minimum_indentation(text)
indent_ = ' ' * min_indent
formated_text = re.sub('^' + indent_, '' + indent_ + '>>> ', text,
flags=re.MULTILINE)
formated_text = re.sub('^$', '' + indent_ + '>>> #', formated_text,
flags=re.MULTILINE)
return formated_text | r"""
CommandLine:
python ~/local/vim/rc/pyvim_funcs.py --test-format_text_as_docstr
Example:
>>> # DISABLE_DOCTEST
>>> from pyvim_funcs import * # NOQA
>>> text = testdata_text()
>>> formated_text = format_text_as_docstr(text)
>>> result = ('formated_text = \n%s' % (str(formated_text),))
>>> print(result) | Below is the the instruction that describes the task:
### Input:
r"""
CommandLine:
python ~/local/vim/rc/pyvim_funcs.py --test-format_text_as_docstr
Example:
>>> # DISABLE_DOCTEST
>>> from pyvim_funcs import * # NOQA
>>> text = testdata_text()
>>> formated_text = format_text_as_docstr(text)
>>> result = ('formated_text = \n%s' % (str(formated_text),))
>>> print(result)
### Response:
def format_text_as_docstr(text):
r"""
CommandLine:
python ~/local/vim/rc/pyvim_funcs.py --test-format_text_as_docstr
Example:
>>> # DISABLE_DOCTEST
>>> from pyvim_funcs import * # NOQA
>>> text = testdata_text()
>>> formated_text = format_text_as_docstr(text)
>>> result = ('formated_text = \n%s' % (str(formated_text),))
>>> print(result)
"""
import utool as ut
import re
min_indent = ut.get_minimum_indentation(text)
indent_ = ' ' * min_indent
formated_text = re.sub('^' + indent_, '' + indent_ + '>>> ', text,
flags=re.MULTILINE)
formated_text = re.sub('^$', '' + indent_ + '>>> #', formated_text,
flags=re.MULTILINE)
return formated_text |
def calc_abort(request, calc_id):
"""
Abort the given calculation, it is it running
"""
job = logs.dbcmd('get_job', calc_id)
if job is None:
message = {'error': 'Unknown job %s' % calc_id}
return HttpResponse(content=json.dumps(message), content_type=JSON)
if job.status not in ('submitted', 'executing'):
message = {'error': 'Job %s is not running' % job.id}
return HttpResponse(content=json.dumps(message), content_type=JSON)
if not utils.user_has_permission(request, job.user_name):
message = {'error': ('User %s has no permission to abort job %s' %
(job.user_name, job.id))}
return HttpResponse(content=json.dumps(message), content_type=JSON,
status=403)
if job.pid: # is a spawned job
try:
os.kill(job.pid, signal.SIGTERM)
except Exception as exc:
logging.error(exc)
else:
logging.warning('Aborting job %d, pid=%d', job.id, job.pid)
logs.dbcmd('set_status', job.id, 'aborted')
message = {'success': 'Killing job %d' % job.id}
return HttpResponse(content=json.dumps(message), content_type=JSON)
message = {'error': 'PID for job %s not found' % job.id}
return HttpResponse(content=json.dumps(message), content_type=JSON) | Abort the given calculation, it is it running | Below is the the instruction that describes the task:
### Input:
Abort the given calculation, it is it running
### Response:
def calc_abort(request, calc_id):
"""
Abort the given calculation, it is it running
"""
job = logs.dbcmd('get_job', calc_id)
if job is None:
message = {'error': 'Unknown job %s' % calc_id}
return HttpResponse(content=json.dumps(message), content_type=JSON)
if job.status not in ('submitted', 'executing'):
message = {'error': 'Job %s is not running' % job.id}
return HttpResponse(content=json.dumps(message), content_type=JSON)
if not utils.user_has_permission(request, job.user_name):
message = {'error': ('User %s has no permission to abort job %s' %
(job.user_name, job.id))}
return HttpResponse(content=json.dumps(message), content_type=JSON,
status=403)
if job.pid: # is a spawned job
try:
os.kill(job.pid, signal.SIGTERM)
except Exception as exc:
logging.error(exc)
else:
logging.warning('Aborting job %d, pid=%d', job.id, job.pid)
logs.dbcmd('set_status', job.id, 'aborted')
message = {'success': 'Killing job %d' % job.id}
return HttpResponse(content=json.dumps(message), content_type=JSON)
message = {'error': 'PID for job %s not found' % job.id}
return HttpResponse(content=json.dumps(message), content_type=JSON) |
def select_action(self, nb_actions, probs):
"""Return the selected action
# Arguments
probs (np.ndarray) : Probabilty for each action
# Returns
action
"""
action = np.random.choice(range(nb_actions), p=probs)
return action | Return the selected action
# Arguments
probs (np.ndarray) : Probabilty for each action
# Returns
action | Below is the the instruction that describes the task:
### Input:
Return the selected action
# Arguments
probs (np.ndarray) : Probabilty for each action
# Returns
action
### Response:
def select_action(self, nb_actions, probs):
"""Return the selected action
# Arguments
probs (np.ndarray) : Probabilty for each action
# Returns
action
"""
action = np.random.choice(range(nb_actions), p=probs)
return action |
def run(self):
"""Render and display Python package documentation.
"""
os.environ['JARN_RUN'] = '1'
self.python.check_valid_python()
args = self.parse_options(self.args)
if args:
arg = args[0]
else:
arg = os.curdir
if arg:
arg = expanduser(arg)
if isfile(arg):
outfile = self.render_file(arg)
elif isdir(arg):
outfile = self.render_long_description(arg)
else:
err_exit('No such file or directory: %s' % arg)
self.open_in_browser(outfile) | Render and display Python package documentation. | Below is the the instruction that describes the task:
### Input:
Render and display Python package documentation.
### Response:
def run(self):
"""Render and display Python package documentation.
"""
os.environ['JARN_RUN'] = '1'
self.python.check_valid_python()
args = self.parse_options(self.args)
if args:
arg = args[0]
else:
arg = os.curdir
if arg:
arg = expanduser(arg)
if isfile(arg):
outfile = self.render_file(arg)
elif isdir(arg):
outfile = self.render_long_description(arg)
else:
err_exit('No such file or directory: %s' % arg)
self.open_in_browser(outfile) |
def send_article_message(self, user_id, articles=None, media_id=None):
"""
发送图文消息
详情请参考 http://mp.weixin.qq.com/wiki/7/12a5a320ae96fecdf0e15cb06123de9f.html
:param user_id: 用户 ID, 就是你收到的 WechatMessage 的 source
:param articles: list 对象, 每个元素为一个 dict 对象, key 包含 `title`, `description`, `picurl`, `url`
:param media_id: 待发送的图文 Media ID
:return: 返回的 JSON 数据包
"""
# neither 'articles' nor 'media_id' is specified
if articles is None and media_id is None:
raise TypeError('must provide one parameter in "articles" and "media_id"')
# articles specified
if articles:
articles_data = []
for article in articles:
article = Article(**article)
articles_data.append({
'title': article.title,
'description': article.description,
'url': article.url,
'picurl': article.picurl,
})
return self.request.post(
url='https://api.weixin.qq.com/cgi-bin/message/custom/send',
data={
'touser': user_id,
'msgtype': 'news',
'news': {
'articles': articles_data,
},
}
)
# media_id specified
return self.request.post(
url='https://api.weixin.qq.com/cgi-bin/message/custom/send',
data={
'touser': user_id,
'msgtype': 'mpnews',
'mpnews': {
'media_id': media_id,
},
}
) | 发送图文消息
详情请参考 http://mp.weixin.qq.com/wiki/7/12a5a320ae96fecdf0e15cb06123de9f.html
:param user_id: 用户 ID, 就是你收到的 WechatMessage 的 source
:param articles: list 对象, 每个元素为一个 dict 对象, key 包含 `title`, `description`, `picurl`, `url`
:param media_id: 待发送的图文 Media ID
:return: 返回的 JSON 数据包 | Below is the the instruction that describes the task:
### Input:
发送图文消息
详情请参考 http://mp.weixin.qq.com/wiki/7/12a5a320ae96fecdf0e15cb06123de9f.html
:param user_id: 用户 ID, 就是你收到的 WechatMessage 的 source
:param articles: list 对象, 每个元素为一个 dict 对象, key 包含 `title`, `description`, `picurl`, `url`
:param media_id: 待发送的图文 Media ID
:return: 返回的 JSON 数据包
### Response:
def send_article_message(self, user_id, articles=None, media_id=None):
"""
发送图文消息
详情请参考 http://mp.weixin.qq.com/wiki/7/12a5a320ae96fecdf0e15cb06123de9f.html
:param user_id: 用户 ID, 就是你收到的 WechatMessage 的 source
:param articles: list 对象, 每个元素为一个 dict 对象, key 包含 `title`, `description`, `picurl`, `url`
:param media_id: 待发送的图文 Media ID
:return: 返回的 JSON 数据包
"""
# neither 'articles' nor 'media_id' is specified
if articles is None and media_id is None:
raise TypeError('must provide one parameter in "articles" and "media_id"')
# articles specified
if articles:
articles_data = []
for article in articles:
article = Article(**article)
articles_data.append({
'title': article.title,
'description': article.description,
'url': article.url,
'picurl': article.picurl,
})
return self.request.post(
url='https://api.weixin.qq.com/cgi-bin/message/custom/send',
data={
'touser': user_id,
'msgtype': 'news',
'news': {
'articles': articles_data,
},
}
)
# media_id specified
return self.request.post(
url='https://api.weixin.qq.com/cgi-bin/message/custom/send',
data={
'touser': user_id,
'msgtype': 'mpnews',
'mpnews': {
'media_id': media_id,
},
}
) |
def use_plenary_sequence_rule_view(self):
"""Pass through to provider SequenceRuleLookupSession.use_plenary_sequence_rule_view"""
self._object_views['sequence_rule'] = PLENARY
# self._get_provider_session('sequence_rule_lookup_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_plenary_sequence_rule_view()
except AttributeError:
pass | Pass through to provider SequenceRuleLookupSession.use_plenary_sequence_rule_view | Below is the the instruction that describes the task:
### Input:
Pass through to provider SequenceRuleLookupSession.use_plenary_sequence_rule_view
### Response:
def use_plenary_sequence_rule_view(self):
"""Pass through to provider SequenceRuleLookupSession.use_plenary_sequence_rule_view"""
self._object_views['sequence_rule'] = PLENARY
# self._get_provider_session('sequence_rule_lookup_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_plenary_sequence_rule_view()
except AttributeError:
pass |
def sync(self, json_obj=None):
"""
synchronize this transport with the Ariane server transport
:return:
"""
LOGGER.debug("Transport.sync")
if json_obj is None:
params = None
if self.id is not None:
params = SessionService.complete_transactional_req({'ID': self.id})
if params is not None:
if MappingService.driver_type != DriverFactory.DRIVER_REST:
params['OPERATION'] = 'getTransport'
args = {'properties': params}
else:
args = {'http_operation': 'GET', 'operation_path': 'get', 'parameters': params}
response = TransportService.requester.call(args)
if MappingService.driver_type != DriverFactory.DRIVER_REST:
response = response.get()
if response.rc == 0:
json_obj = response.response_content
else:
err_msg = 'Transport.sync - Problem while syncing transport (id: ' + str(self.id) + '). ' \
'Reason: ' + str(response.response_content) + ' - ' + str(response.error_message) + \
" (" + str(response.rc) + ")"
LOGGER.warning(err_msg)
if response.rc == 500 and ArianeMappingOverloadError.ERROR_MSG in response.error_message:
raise ArianeMappingOverloadError("Transport.sync",
ArianeMappingOverloadError.ERROR_MSG)
# traceback.print_stack()
elif 'transportID' not in json_obj:
err_msg = 'Transport.sync - Problem while syncing transport (id: ' + str(self.id) + '). ' \
'Reason: inconsistent json_obj' + str(json_obj) + " from : \n"
LOGGER.warning(err_msg)
# traceback.print_stack()
if json_obj is not None:
self.id = json_obj['transportID']
self.name = json_obj['transportName']
if MappingService.driver_type != DriverFactory.DRIVER_REST:
if 'transportProperties' in json_obj:
self.properties = DriverTools.json2properties(json_obj['transportProperties'])
else:
self.properties = None
else:
self.properties = json_obj['transportProperties'] if 'transportProperties' in json_obj else None | synchronize this transport with the Ariane server transport
:return: | Below is the the instruction that describes the task:
### Input:
synchronize this transport with the Ariane server transport
:return:
### Response:
def sync(self, json_obj=None):
"""
synchronize this transport with the Ariane server transport
:return:
"""
LOGGER.debug("Transport.sync")
if json_obj is None:
params = None
if self.id is not None:
params = SessionService.complete_transactional_req({'ID': self.id})
if params is not None:
if MappingService.driver_type != DriverFactory.DRIVER_REST:
params['OPERATION'] = 'getTransport'
args = {'properties': params}
else:
args = {'http_operation': 'GET', 'operation_path': 'get', 'parameters': params}
response = TransportService.requester.call(args)
if MappingService.driver_type != DriverFactory.DRIVER_REST:
response = response.get()
if response.rc == 0:
json_obj = response.response_content
else:
err_msg = 'Transport.sync - Problem while syncing transport (id: ' + str(self.id) + '). ' \
'Reason: ' + str(response.response_content) + ' - ' + str(response.error_message) + \
" (" + str(response.rc) + ")"
LOGGER.warning(err_msg)
if response.rc == 500 and ArianeMappingOverloadError.ERROR_MSG in response.error_message:
raise ArianeMappingOverloadError("Transport.sync",
ArianeMappingOverloadError.ERROR_MSG)
# traceback.print_stack()
elif 'transportID' not in json_obj:
err_msg = 'Transport.sync - Problem while syncing transport (id: ' + str(self.id) + '). ' \
'Reason: inconsistent json_obj' + str(json_obj) + " from : \n"
LOGGER.warning(err_msg)
# traceback.print_stack()
if json_obj is not None:
self.id = json_obj['transportID']
self.name = json_obj['transportName']
if MappingService.driver_type != DriverFactory.DRIVER_REST:
if 'transportProperties' in json_obj:
self.properties = DriverTools.json2properties(json_obj['transportProperties'])
else:
self.properties = None
else:
self.properties = json_obj['transportProperties'] if 'transportProperties' in json_obj else None |
def delete_guest(userid):
""" Destroy a virtual machine.
Input parameters:
:userid: USERID of the guest, last 8 if length > 8
"""
# Check if the guest exists.
guest_list_info = client.send_request('guest_list')
# the string 'userid' need to be coded as 'u'userid' in case of py2 interpreter.
userid_1 = (unicode(userid, "utf-8") if sys.version[0] == '2' else userid)
if userid_1 not in guest_list_info['output']:
RuntimeError("Userid %s does not exist!" % userid)
# Delete the guest.
guest_delete_info = client.send_request('guest_delete', userid)
if guest_delete_info['overallRC']:
print("\nFailed to delete guest %s!" % userid)
else:
print("\nSucceeded to delete guest %s!" % userid) | Destroy a virtual machine.
Input parameters:
:userid: USERID of the guest, last 8 if length > 8 | Below is the the instruction that describes the task:
### Input:
Destroy a virtual machine.
Input parameters:
:userid: USERID of the guest, last 8 if length > 8
### Response:
def delete_guest(userid):
""" Destroy a virtual machine.
Input parameters:
:userid: USERID of the guest, last 8 if length > 8
"""
# Check if the guest exists.
guest_list_info = client.send_request('guest_list')
# the string 'userid' need to be coded as 'u'userid' in case of py2 interpreter.
userid_1 = (unicode(userid, "utf-8") if sys.version[0] == '2' else userid)
if userid_1 not in guest_list_info['output']:
RuntimeError("Userid %s does not exist!" % userid)
# Delete the guest.
guest_delete_info = client.send_request('guest_delete', userid)
if guest_delete_info['overallRC']:
print("\nFailed to delete guest %s!" % userid)
else:
print("\nSucceeded to delete guest %s!" % userid) |
def shutdown(self, restart=False):
"""Request an immediate kernel shutdown.
Upon receipt of the (empty) reply, client code can safely assume that
the kernel has shut down and it's safe to forcefully terminate it if
it's still alive.
The kernel will send the reply via a function registered with Python's
atexit module, ensuring it's truly done as the kernel is done with all
normal operation.
"""
# Send quit message to kernel. Once we implement kernel-side setattr,
# this should probably be done that way, but for now this will do.
msg = self.session.msg('shutdown_request', {'restart':restart})
self._queue_send(msg)
return msg['header']['msg_id'] | Request an immediate kernel shutdown.
Upon receipt of the (empty) reply, client code can safely assume that
the kernel has shut down and it's safe to forcefully terminate it if
it's still alive.
The kernel will send the reply via a function registered with Python's
atexit module, ensuring it's truly done as the kernel is done with all
normal operation. | Below is the the instruction that describes the task:
### Input:
Request an immediate kernel shutdown.
Upon receipt of the (empty) reply, client code can safely assume that
the kernel has shut down and it's safe to forcefully terminate it if
it's still alive.
The kernel will send the reply via a function registered with Python's
atexit module, ensuring it's truly done as the kernel is done with all
normal operation.
### Response:
def shutdown(self, restart=False):
"""Request an immediate kernel shutdown.
Upon receipt of the (empty) reply, client code can safely assume that
the kernel has shut down and it's safe to forcefully terminate it if
it's still alive.
The kernel will send the reply via a function registered with Python's
atexit module, ensuring it's truly done as the kernel is done with all
normal operation.
"""
# Send quit message to kernel. Once we implement kernel-side setattr,
# this should probably be done that way, but for now this will do.
msg = self.session.msg('shutdown_request', {'restart':restart})
self._queue_send(msg)
return msg['header']['msg_id'] |
def fetch_protein_list(self, taxon_id):
"""
Fetch a list of proteins for a species in biomart
:param taxid:
:return: list
"""
protein_list = list()
# col = self.columns['ensembl_biomart']
col = ['ensembl_peptide_id', ]
params = urllib.parse.urlencode(
{'query': self._build_biomart_gene_query(taxon_id, col)})
conn = http.client.HTTPConnection(ENS_URL)
conn.request("GET", '/biomart/martservice?' + params)
response = conn.getresponse()
for line in response:
line = line.decode('utf-8').rstrip()
row = line.split('\t')
if len(row) != len(col):
LOG.warning("Data error for p-list query on %d", taxon_id)
continue
protein_list.append(row[col.index('ensembl_peptide_id')])
conn.close()
return protein_list | Fetch a list of proteins for a species in biomart
:param taxid:
:return: list | Below is the the instruction that describes the task:
### Input:
Fetch a list of proteins for a species in biomart
:param taxid:
:return: list
### Response:
def fetch_protein_list(self, taxon_id):
"""
Fetch a list of proteins for a species in biomart
:param taxid:
:return: list
"""
protein_list = list()
# col = self.columns['ensembl_biomart']
col = ['ensembl_peptide_id', ]
params = urllib.parse.urlencode(
{'query': self._build_biomart_gene_query(taxon_id, col)})
conn = http.client.HTTPConnection(ENS_URL)
conn.request("GET", '/biomart/martservice?' + params)
response = conn.getresponse()
for line in response:
line = line.decode('utf-8').rstrip()
row = line.split('\t')
if len(row) != len(col):
LOG.warning("Data error for p-list query on %d", taxon_id)
continue
protein_list.append(row[col.index('ensembl_peptide_id')])
conn.close()
return protein_list |
def unfold_lines(string):
'''Join lines that are wrapped.
Any line that starts with a space or tab is joined to the previous
line.
'''
assert isinstance(string, str), 'Expect str. Got {}'.format(type(string))
lines = string.splitlines()
line_buffer = io.StringIO()
for line_number in range(len(lines)):
line = lines[line_number]
if line and line[0:1] in (' ', '\t'):
line_buffer.write(' ')
elif line_number != 0:
line_buffer.write('\r\n')
line_buffer.write(line.strip())
line_buffer.write('\r\n')
return line_buffer.getvalue() | Join lines that are wrapped.
Any line that starts with a space or tab is joined to the previous
line. | Below is the the instruction that describes the task:
### Input:
Join lines that are wrapped.
Any line that starts with a space or tab is joined to the previous
line.
### Response:
def unfold_lines(string):
'''Join lines that are wrapped.
Any line that starts with a space or tab is joined to the previous
line.
'''
assert isinstance(string, str), 'Expect str. Got {}'.format(type(string))
lines = string.splitlines()
line_buffer = io.StringIO()
for line_number in range(len(lines)):
line = lines[line_number]
if line and line[0:1] in (' ', '\t'):
line_buffer.write(' ')
elif line_number != 0:
line_buffer.write('\r\n')
line_buffer.write(line.strip())
line_buffer.write('\r\n')
return line_buffer.getvalue() |
def collect_loaded_packages() -> List[Tuple[str, str]]:
"""
Return the currently loaded package names and their versions.
"""
dists = get_installed_distributions()
get_dist_files = DistFilesFinder()
file_table = {}
for dist in dists:
for file in get_dist_files(dist):
file_table[file] = dist
used_dists = set()
# we greedily load all values to a list to avoid weird
# "dictionary changed size during iteration" errors
for module in list(sys.modules.values()):
try:
dist = file_table[module.__file__]
except (AttributeError, KeyError):
continue
used_dists.add(dist)
return sorted((dist.project_name, dist.version) for dist in used_dists) | Return the currently loaded package names and their versions. | Below is the the instruction that describes the task:
### Input:
Return the currently loaded package names and their versions.
### Response:
def collect_loaded_packages() -> List[Tuple[str, str]]:
"""
Return the currently loaded package names and their versions.
"""
dists = get_installed_distributions()
get_dist_files = DistFilesFinder()
file_table = {}
for dist in dists:
for file in get_dist_files(dist):
file_table[file] = dist
used_dists = set()
# we greedily load all values to a list to avoid weird
# "dictionary changed size during iteration" errors
for module in list(sys.modules.values()):
try:
dist = file_table[module.__file__]
except (AttributeError, KeyError):
continue
used_dists.add(dist)
return sorted((dist.project_name, dist.version) for dist in used_dists) |
def load_code_info(phases_or_groups):
"""Recursively load code info for a PhaseGroup or list of phases or groups."""
if isinstance(phases_or_groups, PhaseGroup):
return phases_or_groups.load_code_info()
ret = []
for phase in phases_or_groups:
if isinstance(phase, PhaseGroup):
ret.append(phase.load_code_info())
else:
ret.append(
mutablerecords.CopyRecord(
phase, code_info=test_record.CodeInfo.for_function(phase.func)))
return ret | Recursively load code info for a PhaseGroup or list of phases or groups. | Below is the the instruction that describes the task:
### Input:
Recursively load code info for a PhaseGroup or list of phases or groups.
### Response:
def load_code_info(phases_or_groups):
"""Recursively load code info for a PhaseGroup or list of phases or groups."""
if isinstance(phases_or_groups, PhaseGroup):
return phases_or_groups.load_code_info()
ret = []
for phase in phases_or_groups:
if isinstance(phase, PhaseGroup):
ret.append(phase.load_code_info())
else:
ret.append(
mutablerecords.CopyRecord(
phase, code_info=test_record.CodeInfo.for_function(phase.func)))
return ret |
def compute_trip_stats(
feed: "Feed",
route_ids: Optional[List[str]] = None,
*,
compute_dist_from_shapes: bool = False,
) -> DataFrame:
"""
Return a DataFrame with the following columns:
- ``'trip_id'``
- ``'route_id'``
- ``'route_short_name'``
- ``'route_type'``
- ``'direction_id'``: NaN if missing from feed
- ``'shape_id'``: NaN if missing from feed
- ``'num_stops'``: number of stops on trip
- ``'start_time'``: first departure time of the trip
- ``'end_time'``: last departure time of the trip
- ``'start_stop_id'``: stop ID of the first stop of the trip
- ``'end_stop_id'``: stop ID of the last stop of the trip
- ``'is_loop'``: 1 if the start and end stop are less than 400m apart and
0 otherwise
- ``'distance'``: distance of the trip in ``feed.dist_units``;
contains all ``np.nan`` entries if ``feed.shapes is None``
- ``'duration'``: duration of the trip in hours
- ``'speed'``: distance/duration
If ``feed.stop_times`` has a ``shape_dist_traveled`` column with at
least one non-NaN value and ``compute_dist_from_shapes == False``,
then use that column to compute the distance column.
Else if ``feed.shapes is not None``, then compute the distance
column using the shapes and Shapely.
Otherwise, set the distances to NaN.
If route IDs are given, then restrict to trips on those routes.
Notes
-----
- Assume the following feed attributes are not ``None``:
* ``feed.trips``
* ``feed.routes``
* ``feed.stop_times``
* ``feed.shapes`` (optionally)
* Those used in :func:`.stops.build_geometry_by_stop`
- Calculating trip distances with ``compute_dist_from_shapes=True``
seems pretty accurate. For example, calculating trip distances on
`this Portland feed
<https://transitfeeds.com/p/trimet/43/1400947517>`_
using ``compute_dist_from_shapes=False`` and
``compute_dist_from_shapes=True``,
yields a difference of at most 0.83km from the original values.
"""
f = feed.trips.copy()
# Restrict to given route IDs
if route_ids is not None:
f = f[f["route_id"].isin(route_ids)].copy()
# Merge with stop times and extra trip info.
# Convert departure times to seconds past midnight to
# compute trip durations later.
if "direction_id" not in f.columns:
f["direction_id"] = np.nan
if "shape_id" not in f.columns:
f["shape_id"] = np.nan
f = (
f[["route_id", "trip_id", "direction_id", "shape_id"]]
.merge(feed.routes[["route_id", "route_short_name", "route_type"]])
.merge(feed.stop_times)
.sort_values(["trip_id", "stop_sequence"])
.assign(
departure_time=lambda x: x["departure_time"].map(
hp.timestr_to_seconds
)
)
)
# Compute all trips stats except distance,
# which is possibly more involved
geometry_by_stop = feed.build_geometry_by_stop(use_utm=True)
g = f.groupby("trip_id")
def my_agg(group):
d = OrderedDict()
d["route_id"] = group["route_id"].iat[0]
d["route_short_name"] = group["route_short_name"].iat[0]
d["route_type"] = group["route_type"].iat[0]
d["direction_id"] = group["direction_id"].iat[0]
d["shape_id"] = group["shape_id"].iat[0]
d["num_stops"] = group.shape[0]
d["start_time"] = group["departure_time"].iat[0]
d["end_time"] = group["departure_time"].iat[-1]
d["start_stop_id"] = group["stop_id"].iat[0]
d["end_stop_id"] = group["stop_id"].iat[-1]
dist = geometry_by_stop[d["start_stop_id"]].distance(
geometry_by_stop[d["end_stop_id"]]
)
d["is_loop"] = int(dist < 400)
d["duration"] = (d["end_time"] - d["start_time"]) / 3600
return pd.Series(d)
# Apply my_agg, but don't reset index yet.
# Need trip ID as index to line up the results of the
# forthcoming distance calculation
h = g.apply(my_agg)
# Compute distance
if (
hp.is_not_null(f, "shape_dist_traveled")
and not compute_dist_from_shapes
):
# Compute distances using shape_dist_traveled column
h["distance"] = g.apply(
lambda group: group["shape_dist_traveled"].max()
)
elif feed.shapes is not None:
# Compute distances using the shapes and Shapely
geometry_by_shape = feed.build_geometry_by_shape(use_utm=True)
geometry_by_stop = feed.build_geometry_by_stop(use_utm=True)
m_to_dist = hp.get_convert_dist("m", feed.dist_units)
def compute_dist(group):
"""
Return the distance traveled along the trip between the
first and last stops.
If that distance is negative or if the trip's linestring
intersects itfeed, then return the length of the trip's
linestring instead.
"""
shape = group["shape_id"].iat[0]
try:
# Get the linestring for this trip
linestring = geometry_by_shape[shape]
except KeyError:
# Shape ID is NaN or doesn't exist in shapes.
# No can do.
return np.nan
# If the linestring intersects itfeed, then that can cause
# errors in the computation below, so just
# return the length of the linestring as a good approximation
D = linestring.length
if not linestring.is_simple:
return D
# Otherwise, return the difference of the distances along
# the linestring of the first and last stop
start_stop = group["stop_id"].iat[0]
end_stop = group["stop_id"].iat[-1]
try:
start_point = geometry_by_stop[start_stop]
end_point = geometry_by_stop[end_stop]
except KeyError:
# One of the two stop IDs is NaN, so just
# return the length of the linestring
return D
d1 = linestring.project(start_point)
d2 = linestring.project(end_point)
d = d2 - d1
if 0 < d < D + 100:
return d
else:
# Something is probably wrong, so just
# return the length of the linestring
return D
h["distance"] = g.apply(compute_dist)
# Convert from meters
h["distance"] = h["distance"].map(m_to_dist)
else:
h["distance"] = np.nan
# Reset index and compute final stats
h = h.reset_index()
h["speed"] = h["distance"] / h["duration"]
h[["start_time", "end_time"]] = h[["start_time", "end_time"]].applymap(
lambda x: hp.timestr_to_seconds(x, inverse=True)
)
return h.sort_values(["route_id", "direction_id", "start_time"]) | Return a DataFrame with the following columns:
- ``'trip_id'``
- ``'route_id'``
- ``'route_short_name'``
- ``'route_type'``
- ``'direction_id'``: NaN if missing from feed
- ``'shape_id'``: NaN if missing from feed
- ``'num_stops'``: number of stops on trip
- ``'start_time'``: first departure time of the trip
- ``'end_time'``: last departure time of the trip
- ``'start_stop_id'``: stop ID of the first stop of the trip
- ``'end_stop_id'``: stop ID of the last stop of the trip
- ``'is_loop'``: 1 if the start and end stop are less than 400m apart and
0 otherwise
- ``'distance'``: distance of the trip in ``feed.dist_units``;
contains all ``np.nan`` entries if ``feed.shapes is None``
- ``'duration'``: duration of the trip in hours
- ``'speed'``: distance/duration
If ``feed.stop_times`` has a ``shape_dist_traveled`` column with at
least one non-NaN value and ``compute_dist_from_shapes == False``,
then use that column to compute the distance column.
Else if ``feed.shapes is not None``, then compute the distance
column using the shapes and Shapely.
Otherwise, set the distances to NaN.
If route IDs are given, then restrict to trips on those routes.
Notes
-----
- Assume the following feed attributes are not ``None``:
* ``feed.trips``
* ``feed.routes``
* ``feed.stop_times``
* ``feed.shapes`` (optionally)
* Those used in :func:`.stops.build_geometry_by_stop`
- Calculating trip distances with ``compute_dist_from_shapes=True``
seems pretty accurate. For example, calculating trip distances on
`this Portland feed
<https://transitfeeds.com/p/trimet/43/1400947517>`_
using ``compute_dist_from_shapes=False`` and
``compute_dist_from_shapes=True``,
yields a difference of at most 0.83km from the original values. | Below is the the instruction that describes the task:
### Input:
Return a DataFrame with the following columns:
- ``'trip_id'``
- ``'route_id'``
- ``'route_short_name'``
- ``'route_type'``
- ``'direction_id'``: NaN if missing from feed
- ``'shape_id'``: NaN if missing from feed
- ``'num_stops'``: number of stops on trip
- ``'start_time'``: first departure time of the trip
- ``'end_time'``: last departure time of the trip
- ``'start_stop_id'``: stop ID of the first stop of the trip
- ``'end_stop_id'``: stop ID of the last stop of the trip
- ``'is_loop'``: 1 if the start and end stop are less than 400m apart and
0 otherwise
- ``'distance'``: distance of the trip in ``feed.dist_units``;
contains all ``np.nan`` entries if ``feed.shapes is None``
- ``'duration'``: duration of the trip in hours
- ``'speed'``: distance/duration
If ``feed.stop_times`` has a ``shape_dist_traveled`` column with at
least one non-NaN value and ``compute_dist_from_shapes == False``,
then use that column to compute the distance column.
Else if ``feed.shapes is not None``, then compute the distance
column using the shapes and Shapely.
Otherwise, set the distances to NaN.
If route IDs are given, then restrict to trips on those routes.
Notes
-----
- Assume the following feed attributes are not ``None``:
* ``feed.trips``
* ``feed.routes``
* ``feed.stop_times``
* ``feed.shapes`` (optionally)
* Those used in :func:`.stops.build_geometry_by_stop`
- Calculating trip distances with ``compute_dist_from_shapes=True``
seems pretty accurate. For example, calculating trip distances on
`this Portland feed
<https://transitfeeds.com/p/trimet/43/1400947517>`_
using ``compute_dist_from_shapes=False`` and
``compute_dist_from_shapes=True``,
yields a difference of at most 0.83km from the original values.
### Response:
def compute_trip_stats(
feed: "Feed",
route_ids: Optional[List[str]] = None,
*,
compute_dist_from_shapes: bool = False,
) -> DataFrame:
"""
Return a DataFrame with the following columns:
- ``'trip_id'``
- ``'route_id'``
- ``'route_short_name'``
- ``'route_type'``
- ``'direction_id'``: NaN if missing from feed
- ``'shape_id'``: NaN if missing from feed
- ``'num_stops'``: number of stops on trip
- ``'start_time'``: first departure time of the trip
- ``'end_time'``: last departure time of the trip
- ``'start_stop_id'``: stop ID of the first stop of the trip
- ``'end_stop_id'``: stop ID of the last stop of the trip
- ``'is_loop'``: 1 if the start and end stop are less than 400m apart and
0 otherwise
- ``'distance'``: distance of the trip in ``feed.dist_units``;
contains all ``np.nan`` entries if ``feed.shapes is None``
- ``'duration'``: duration of the trip in hours
- ``'speed'``: distance/duration
If ``feed.stop_times`` has a ``shape_dist_traveled`` column with at
least one non-NaN value and ``compute_dist_from_shapes == False``,
then use that column to compute the distance column.
Else if ``feed.shapes is not None``, then compute the distance
column using the shapes and Shapely.
Otherwise, set the distances to NaN.
If route IDs are given, then restrict to trips on those routes.
Notes
-----
- Assume the following feed attributes are not ``None``:
* ``feed.trips``
* ``feed.routes``
* ``feed.stop_times``
* ``feed.shapes`` (optionally)
* Those used in :func:`.stops.build_geometry_by_stop`
- Calculating trip distances with ``compute_dist_from_shapes=True``
seems pretty accurate. For example, calculating trip distances on
`this Portland feed
<https://transitfeeds.com/p/trimet/43/1400947517>`_
using ``compute_dist_from_shapes=False`` and
``compute_dist_from_shapes=True``,
yields a difference of at most 0.83km from the original values.
"""
f = feed.trips.copy()
# Restrict to given route IDs
if route_ids is not None:
f = f[f["route_id"].isin(route_ids)].copy()
# Merge with stop times and extra trip info.
# Convert departure times to seconds past midnight to
# compute trip durations later.
if "direction_id" not in f.columns:
f["direction_id"] = np.nan
if "shape_id" not in f.columns:
f["shape_id"] = np.nan
f = (
f[["route_id", "trip_id", "direction_id", "shape_id"]]
.merge(feed.routes[["route_id", "route_short_name", "route_type"]])
.merge(feed.stop_times)
.sort_values(["trip_id", "stop_sequence"])
.assign(
departure_time=lambda x: x["departure_time"].map(
hp.timestr_to_seconds
)
)
)
# Compute all trips stats except distance,
# which is possibly more involved
geometry_by_stop = feed.build_geometry_by_stop(use_utm=True)
g = f.groupby("trip_id")
def my_agg(group):
d = OrderedDict()
d["route_id"] = group["route_id"].iat[0]
d["route_short_name"] = group["route_short_name"].iat[0]
d["route_type"] = group["route_type"].iat[0]
d["direction_id"] = group["direction_id"].iat[0]
d["shape_id"] = group["shape_id"].iat[0]
d["num_stops"] = group.shape[0]
d["start_time"] = group["departure_time"].iat[0]
d["end_time"] = group["departure_time"].iat[-1]
d["start_stop_id"] = group["stop_id"].iat[0]
d["end_stop_id"] = group["stop_id"].iat[-1]
dist = geometry_by_stop[d["start_stop_id"]].distance(
geometry_by_stop[d["end_stop_id"]]
)
d["is_loop"] = int(dist < 400)
d["duration"] = (d["end_time"] - d["start_time"]) / 3600
return pd.Series(d)
# Apply my_agg, but don't reset index yet.
# Need trip ID as index to line up the results of the
# forthcoming distance calculation
h = g.apply(my_agg)
# Compute distance
if (
hp.is_not_null(f, "shape_dist_traveled")
and not compute_dist_from_shapes
):
# Compute distances using shape_dist_traveled column
h["distance"] = g.apply(
lambda group: group["shape_dist_traveled"].max()
)
elif feed.shapes is not None:
# Compute distances using the shapes and Shapely
geometry_by_shape = feed.build_geometry_by_shape(use_utm=True)
geometry_by_stop = feed.build_geometry_by_stop(use_utm=True)
m_to_dist = hp.get_convert_dist("m", feed.dist_units)
def compute_dist(group):
"""
Return the distance traveled along the trip between the
first and last stops.
If that distance is negative or if the trip's linestring
intersects itfeed, then return the length of the trip's
linestring instead.
"""
shape = group["shape_id"].iat[0]
try:
# Get the linestring for this trip
linestring = geometry_by_shape[shape]
except KeyError:
# Shape ID is NaN or doesn't exist in shapes.
# No can do.
return np.nan
# If the linestring intersects itfeed, then that can cause
# errors in the computation below, so just
# return the length of the linestring as a good approximation
D = linestring.length
if not linestring.is_simple:
return D
# Otherwise, return the difference of the distances along
# the linestring of the first and last stop
start_stop = group["stop_id"].iat[0]
end_stop = group["stop_id"].iat[-1]
try:
start_point = geometry_by_stop[start_stop]
end_point = geometry_by_stop[end_stop]
except KeyError:
# One of the two stop IDs is NaN, so just
# return the length of the linestring
return D
d1 = linestring.project(start_point)
d2 = linestring.project(end_point)
d = d2 - d1
if 0 < d < D + 100:
return d
else:
# Something is probably wrong, so just
# return the length of the linestring
return D
h["distance"] = g.apply(compute_dist)
# Convert from meters
h["distance"] = h["distance"].map(m_to_dist)
else:
h["distance"] = np.nan
# Reset index and compute final stats
h = h.reset_index()
h["speed"] = h["distance"] / h["duration"]
h[["start_time", "end_time"]] = h[["start_time", "end_time"]].applymap(
lambda x: hp.timestr_to_seconds(x, inverse=True)
)
return h.sort_values(["route_id", "direction_id", "start_time"]) |
def date_time_between_dates(
self,
datetime_start=None,
datetime_end=None,
tzinfo=None):
"""
Takes two DateTime objects and returns a random datetime between the two
given datetimes.
Accepts DateTime objects.
:param datetime_start: DateTime
:param datetime_end: DateTime
:param tzinfo: timezone, instance of datetime.tzinfo subclass
:example DateTime('1999-02-02 11:42:52')
:return DateTime
"""
if datetime_start is None:
datetime_start = datetime.now(tzinfo)
if datetime_end is None:
datetime_end = datetime.now(tzinfo)
timestamp = self.generator.random.randint(
datetime_to_timestamp(datetime_start),
datetime_to_timestamp(datetime_end),
)
try:
if tzinfo is None:
pick = datetime.fromtimestamp(timestamp, tzlocal())
pick = pick.astimezone(tzutc()).replace(tzinfo=None)
else:
pick = datetime.fromtimestamp(timestamp, tzinfo)
except OverflowError:
raise OverflowError(
"You specified an end date with a timestamp bigger than the maximum allowed on this"
" system. Please specify an earlier date.",
)
return pick | Takes two DateTime objects and returns a random datetime between the two
given datetimes.
Accepts DateTime objects.
:param datetime_start: DateTime
:param datetime_end: DateTime
:param tzinfo: timezone, instance of datetime.tzinfo subclass
:example DateTime('1999-02-02 11:42:52')
:return DateTime | Below is the the instruction that describes the task:
### Input:
Takes two DateTime objects and returns a random datetime between the two
given datetimes.
Accepts DateTime objects.
:param datetime_start: DateTime
:param datetime_end: DateTime
:param tzinfo: timezone, instance of datetime.tzinfo subclass
:example DateTime('1999-02-02 11:42:52')
:return DateTime
### Response:
def date_time_between_dates(
self,
datetime_start=None,
datetime_end=None,
tzinfo=None):
"""
Takes two DateTime objects and returns a random datetime between the two
given datetimes.
Accepts DateTime objects.
:param datetime_start: DateTime
:param datetime_end: DateTime
:param tzinfo: timezone, instance of datetime.tzinfo subclass
:example DateTime('1999-02-02 11:42:52')
:return DateTime
"""
if datetime_start is None:
datetime_start = datetime.now(tzinfo)
if datetime_end is None:
datetime_end = datetime.now(tzinfo)
timestamp = self.generator.random.randint(
datetime_to_timestamp(datetime_start),
datetime_to_timestamp(datetime_end),
)
try:
if tzinfo is None:
pick = datetime.fromtimestamp(timestamp, tzlocal())
pick = pick.astimezone(tzutc()).replace(tzinfo=None)
else:
pick = datetime.fromtimestamp(timestamp, tzinfo)
except OverflowError:
raise OverflowError(
"You specified an end date with a timestamp bigger than the maximum allowed on this"
" system. Please specify an earlier date.",
)
return pick |
def setVisibleColumns(self, visible):
"""
Sets the list of visible columns for this widget. This method will
take any column in this tree's list NOT found within the inputed column
list and hide them.
:param columns | [<str>, ..]
"""
colnames = self.columns()
for c, column in enumerate(colnames):
self.setColumnHidden(c, column not in visible) | Sets the list of visible columns for this widget. This method will
take any column in this tree's list NOT found within the inputed column
list and hide them.
:param columns | [<str>, ..] | Below is the the instruction that describes the task:
### Input:
Sets the list of visible columns for this widget. This method will
take any column in this tree's list NOT found within the inputed column
list and hide them.
:param columns | [<str>, ..]
### Response:
def setVisibleColumns(self, visible):
"""
Sets the list of visible columns for this widget. This method will
take any column in this tree's list NOT found within the inputed column
list and hide them.
:param columns | [<str>, ..]
"""
colnames = self.columns()
for c, column in enumerate(colnames):
self.setColumnHidden(c, column not in visible) |
def _integration(data, sample_rate):
"""
Moving window integration. N is the number of samples in the width of the integration
window
----------
Parameters
----------
data : ndarray
Samples of the signal where a moving window integration will be applied.
sample_rate : int
Sampling rate at which the acquisition took place.
Returns
-------
out : ndarray
Integrated signal samples.
"""
wind_size = int(0.080 * sample_rate)
int_ecg = numpy.zeros_like(data)
cum_sum = data.cumsum()
int_ecg[wind_size:] = (cum_sum[wind_size:] - cum_sum[:-wind_size]) / wind_size
int_ecg[:wind_size] = cum_sum[:wind_size] / numpy.arange(1, wind_size + 1)
return int_ecg | Moving window integration. N is the number of samples in the width of the integration
window
----------
Parameters
----------
data : ndarray
Samples of the signal where a moving window integration will be applied.
sample_rate : int
Sampling rate at which the acquisition took place.
Returns
-------
out : ndarray
Integrated signal samples. | Below is the the instruction that describes the task:
### Input:
Moving window integration. N is the number of samples in the width of the integration
window
----------
Parameters
----------
data : ndarray
Samples of the signal where a moving window integration will be applied.
sample_rate : int
Sampling rate at which the acquisition took place.
Returns
-------
out : ndarray
Integrated signal samples.
### Response:
def _integration(data, sample_rate):
"""
Moving window integration. N is the number of samples in the width of the integration
window
----------
Parameters
----------
data : ndarray
Samples of the signal where a moving window integration will be applied.
sample_rate : int
Sampling rate at which the acquisition took place.
Returns
-------
out : ndarray
Integrated signal samples.
"""
wind_size = int(0.080 * sample_rate)
int_ecg = numpy.zeros_like(data)
cum_sum = data.cumsum()
int_ecg[wind_size:] = (cum_sum[wind_size:] - cum_sum[:-wind_size]) / wind_size
int_ecg[:wind_size] = cum_sum[:wind_size] / numpy.arange(1, wind_size + 1)
return int_ecg |
def list_storage_class(self, **kwargs):
"""
list or watch objects of kind StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_storage_class(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1StorageClassList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_storage_class_with_http_info(**kwargs)
else:
(data) = self.list_storage_class_with_http_info(**kwargs)
return data | list or watch objects of kind StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_storage_class(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1StorageClassList
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
list or watch objects of kind StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_storage_class(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1StorageClassList
If the method is called asynchronously,
returns the request thread.
### Response:
def list_storage_class(self, **kwargs):
"""
list or watch objects of kind StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_storage_class(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1StorageClassList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_storage_class_with_http_info(**kwargs)
else:
(data) = self.list_storage_class_with_http_info(**kwargs)
return data |
def register( cls, plugin ):
"""
Registers a particular plugin to the global system at the given name.
:param plugin | <XWizardPlugin>
"""
if ( not plugin ):
return
if ( cls._plugins is None ):
cls._plugins = {}
cls._plugins[plugin.uniqueName()] = plugin | Registers a particular plugin to the global system at the given name.
:param plugin | <XWizardPlugin> | Below is the the instruction that describes the task:
### Input:
Registers a particular plugin to the global system at the given name.
:param plugin | <XWizardPlugin>
### Response:
def register( cls, plugin ):
"""
Registers a particular plugin to the global system at the given name.
:param plugin | <XWizardPlugin>
"""
if ( not plugin ):
return
if ( cls._plugins is None ):
cls._plugins = {}
cls._plugins[plugin.uniqueName()] = plugin |
def post(method, hmc, uri, uri_parms, body, logon_required,
wait_for_completion):
"""Operation: Load Logical Partition (requires classic mode)."""
assert wait_for_completion is True # async not supported yet
lpar_oid = uri_parms[0]
lpar_uri = '/api/logical-partitions/' + lpar_oid
try:
lpar = hmc.lookup_by_uri(lpar_uri)
except KeyError:
raise InvalidResourceError(method, uri)
cpc = lpar.manager.parent
assert not cpc.dpm_enabled
status = lpar.properties.get('status', None)
force = body.get('force', False) if body else False
clear_indicator = body.get('clear-indicator', True) if body else True
store_status_indicator = body.get('store-status-indicator',
False) if body else False
if status == 'not-activated':
raise ConflictError(method, uri, reason=0,
message="LPAR {!r} could not be loaded "
"because the LPAR is in status {}.".
format(lpar.name, status))
elif status == 'operating' and not force:
raise ServerError(method, uri, reason=263,
message="LPAR {!r} could not be loaded "
"because the LPAR is already loaded "
"(and force was not specified).".
format(lpar.name))
load_address = body.get('load-address', None) if body else None
if not load_address:
# Starting with z14, this parameter is optional and a last-used
# property is available.
load_address = lpar.properties.get('last-used-load-address', None)
if load_address is None:
# TODO: Verify actual error for this case on a z14.
raise BadRequestError(method, uri, reason=5,
message="LPAR {!r} could not be loaded "
"because a load address is not specified "
"in the request or in the Lpar last-used "
"property".
format(lpar.name))
load_parameter = body.get('load-parameter', None) if body else None
if not load_parameter:
# Starting with z14, a last-used property is available.
load_parameter = lpar.properties.get(
'last-used-load-parameter', None)
if load_parameter is None:
load_parameter = ''
# Reflect the load in the resource
if clear_indicator:
lpar.properties['memory'] = ''
if store_status_indicator:
lpar.properties['stored-status'] = status
else:
lpar.properties['stored-status'] = None
lpar.properties['status'] = LparLoadHandler.get_status()
lpar.properties['last-used-load-address'] = load_address
lpar.properties['last-used-load-parameter'] = load_parameter | Operation: Load Logical Partition (requires classic mode). | Below is the the instruction that describes the task:
### Input:
Operation: Load Logical Partition (requires classic mode).
### Response:
def post(method, hmc, uri, uri_parms, body, logon_required,
wait_for_completion):
"""Operation: Load Logical Partition (requires classic mode)."""
assert wait_for_completion is True # async not supported yet
lpar_oid = uri_parms[0]
lpar_uri = '/api/logical-partitions/' + lpar_oid
try:
lpar = hmc.lookup_by_uri(lpar_uri)
except KeyError:
raise InvalidResourceError(method, uri)
cpc = lpar.manager.parent
assert not cpc.dpm_enabled
status = lpar.properties.get('status', None)
force = body.get('force', False) if body else False
clear_indicator = body.get('clear-indicator', True) if body else True
store_status_indicator = body.get('store-status-indicator',
False) if body else False
if status == 'not-activated':
raise ConflictError(method, uri, reason=0,
message="LPAR {!r} could not be loaded "
"because the LPAR is in status {}.".
format(lpar.name, status))
elif status == 'operating' and not force:
raise ServerError(method, uri, reason=263,
message="LPAR {!r} could not be loaded "
"because the LPAR is already loaded "
"(and force was not specified).".
format(lpar.name))
load_address = body.get('load-address', None) if body else None
if not load_address:
# Starting with z14, this parameter is optional and a last-used
# property is available.
load_address = lpar.properties.get('last-used-load-address', None)
if load_address is None:
# TODO: Verify actual error for this case on a z14.
raise BadRequestError(method, uri, reason=5,
message="LPAR {!r} could not be loaded "
"because a load address is not specified "
"in the request or in the Lpar last-used "
"property".
format(lpar.name))
load_parameter = body.get('load-parameter', None) if body else None
if not load_parameter:
# Starting with z14, a last-used property is available.
load_parameter = lpar.properties.get(
'last-used-load-parameter', None)
if load_parameter is None:
load_parameter = ''
# Reflect the load in the resource
if clear_indicator:
lpar.properties['memory'] = ''
if store_status_indicator:
lpar.properties['stored-status'] = status
else:
lpar.properties['stored-status'] = None
lpar.properties['status'] = LparLoadHandler.get_status()
lpar.properties['last-used-load-address'] = load_address
lpar.properties['last-used-load-parameter'] = load_parameter |
def load_clients_file(filename, configuration_class=ClientConfiguration):
"""
Loads client configurations from a YAML file.
:param filename: YAML file name.
:type filename: unicode | str
:param configuration_class: Class of the configuration object to create.
:type configuration_class: class
:return: A dictionary of client configuration objects.
:rtype: dict[unicode | str, dockermap.map.config.client.ClientConfiguration]
"""
with open(filename, 'r') as f:
return load_clients(f, configuration_class=configuration_class) | Loads client configurations from a YAML file.
:param filename: YAML file name.
:type filename: unicode | str
:param configuration_class: Class of the configuration object to create.
:type configuration_class: class
:return: A dictionary of client configuration objects.
:rtype: dict[unicode | str, dockermap.map.config.client.ClientConfiguration] | Below is the the instruction that describes the task:
### Input:
Loads client configurations from a YAML file.
:param filename: YAML file name.
:type filename: unicode | str
:param configuration_class: Class of the configuration object to create.
:type configuration_class: class
:return: A dictionary of client configuration objects.
:rtype: dict[unicode | str, dockermap.map.config.client.ClientConfiguration]
### Response:
def load_clients_file(filename, configuration_class=ClientConfiguration):
"""
Loads client configurations from a YAML file.
:param filename: YAML file name.
:type filename: unicode | str
:param configuration_class: Class of the configuration object to create.
:type configuration_class: class
:return: A dictionary of client configuration objects.
:rtype: dict[unicode | str, dockermap.map.config.client.ClientConfiguration]
"""
with open(filename, 'r') as f:
return load_clients(f, configuration_class=configuration_class) |
def search(self, **kwargs):
"""Searches for files/folders
Args:
\*\*kwargs (dict): A dictionary containing necessary parameters
(check https://developers.box.com/docs/#search for
list of parameters)
Returns:
dict. Response from Box.
Raises:
BoxError: An error response is returned from Box (status_code >= 400).
BoxHttpResponseError: Response from Box is malformed.
requests.exceptions.*: Any connection related problem.
"""
query_string = {}
for key, value in kwargs.iteritems():
query_string[key] = value
return self.__request("GET","search",querystring=query_string) | Searches for files/folders
Args:
\*\*kwargs (dict): A dictionary containing necessary parameters
(check https://developers.box.com/docs/#search for
list of parameters)
Returns:
dict. Response from Box.
Raises:
BoxError: An error response is returned from Box (status_code >= 400).
BoxHttpResponseError: Response from Box is malformed.
requests.exceptions.*: Any connection related problem. | Below is the the instruction that describes the task:
### Input:
Searches for files/folders
Args:
\*\*kwargs (dict): A dictionary containing necessary parameters
(check https://developers.box.com/docs/#search for
list of parameters)
Returns:
dict. Response from Box.
Raises:
BoxError: An error response is returned from Box (status_code >= 400).
BoxHttpResponseError: Response from Box is malformed.
requests.exceptions.*: Any connection related problem.
### Response:
def search(self, **kwargs):
"""Searches for files/folders
Args:
\*\*kwargs (dict): A dictionary containing necessary parameters
(check https://developers.box.com/docs/#search for
list of parameters)
Returns:
dict. Response from Box.
Raises:
BoxError: An error response is returned from Box (status_code >= 400).
BoxHttpResponseError: Response from Box is malformed.
requests.exceptions.*: Any connection related problem.
"""
query_string = {}
for key, value in kwargs.iteritems():
query_string[key] = value
return self.__request("GET","search",querystring=query_string) |
def TerminalSize():
"""Returns terminal length and width as a tuple."""
try:
with open(os.ctermid(), 'r') as tty_instance:
length_width = struct.unpack(
'hh', fcntl.ioctl(tty_instance.fileno(), termios.TIOCGWINSZ, '1234'))
except (IOError, OSError):
try:
length_width = (int(os.environ['LINES']),
int(os.environ['COLUMNS']))
except (ValueError, KeyError):
length_width = (24, 80)
return length_width | Returns terminal length and width as a tuple. | Below is the the instruction that describes the task:
### Input:
Returns terminal length and width as a tuple.
### Response:
def TerminalSize():
"""Returns terminal length and width as a tuple."""
try:
with open(os.ctermid(), 'r') as tty_instance:
length_width = struct.unpack(
'hh', fcntl.ioctl(tty_instance.fileno(), termios.TIOCGWINSZ, '1234'))
except (IOError, OSError):
try:
length_width = (int(os.environ['LINES']),
int(os.environ['COLUMNS']))
except (ValueError, KeyError):
length_width = (24, 80)
return length_width |
def plot(self, stachans='all', size=(10, 7), show=True):
"""
Plot the output basis vectors for the detector at the given dimension.
Corresponds to the first n horizontal vectors of the V matrix.
:type stachans: list
:param stachans: list of tuples of station, channel pairs to plot.
:type stachans: list
:param stachans: List of tuples of (station, channel) to use. Can set\
to 'all' to use all the station-channel pairs available. If \
detector is multiplexed, will just plot that.
:type size: tuple
:param size: Figure size.
:type show: bool
:param show: Whether or not to show the figure.
:returns: Figure
:rtype: matplotlib.pyplot.Figure
.. Note::
See :func:`eqcorrscan.utils.plotting.subspace_detector_plot`
for example.
"""
return subspace_detector_plot(detector=self, stachans=stachans,
size=size, show=show) | Plot the output basis vectors for the detector at the given dimension.
Corresponds to the first n horizontal vectors of the V matrix.
:type stachans: list
:param stachans: list of tuples of station, channel pairs to plot.
:type stachans: list
:param stachans: List of tuples of (station, channel) to use. Can set\
to 'all' to use all the station-channel pairs available. If \
detector is multiplexed, will just plot that.
:type size: tuple
:param size: Figure size.
:type show: bool
:param show: Whether or not to show the figure.
:returns: Figure
:rtype: matplotlib.pyplot.Figure
.. Note::
See :func:`eqcorrscan.utils.plotting.subspace_detector_plot`
for example. | Below is the the instruction that describes the task:
### Input:
Plot the output basis vectors for the detector at the given dimension.
Corresponds to the first n horizontal vectors of the V matrix.
:type stachans: list
:param stachans: list of tuples of station, channel pairs to plot.
:type stachans: list
:param stachans: List of tuples of (station, channel) to use. Can set\
to 'all' to use all the station-channel pairs available. If \
detector is multiplexed, will just plot that.
:type size: tuple
:param size: Figure size.
:type show: bool
:param show: Whether or not to show the figure.
:returns: Figure
:rtype: matplotlib.pyplot.Figure
.. Note::
See :func:`eqcorrscan.utils.plotting.subspace_detector_plot`
for example.
### Response:
def plot(self, stachans='all', size=(10, 7), show=True):
"""
Plot the output basis vectors for the detector at the given dimension.
Corresponds to the first n horizontal vectors of the V matrix.
:type stachans: list
:param stachans: list of tuples of station, channel pairs to plot.
:type stachans: list
:param stachans: List of tuples of (station, channel) to use. Can set\
to 'all' to use all the station-channel pairs available. If \
detector is multiplexed, will just plot that.
:type size: tuple
:param size: Figure size.
:type show: bool
:param show: Whether or not to show the figure.
:returns: Figure
:rtype: matplotlib.pyplot.Figure
.. Note::
See :func:`eqcorrscan.utils.plotting.subspace_detector_plot`
for example.
"""
return subspace_detector_plot(detector=self, stachans=stachans,
size=size, show=show) |
def _do_shell(self, line):
"""Send a command to the Unix shell.\n==> Usage: shell ls ~"""
if not line:
return
sp = Popen(line,
shell=True,
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
close_fds=not WINDOWS)
(fo, fe) = (sp.stdout, sp.stderr)
if PY2:
out = fo.read().strip(EOL)
err = fe.read().strip(EOL)
else:
out = fo.read().decode("utf-8")
err = fe.read().decode("utf-8")
if out:
print(out)
return
if err:
print(err.replace('isbn_', '')) | Send a command to the Unix shell.\n==> Usage: shell ls ~ | Below is the the instruction that describes the task:
### Input:
Send a command to the Unix shell.\n==> Usage: shell ls ~
### Response:
def _do_shell(self, line):
"""Send a command to the Unix shell.\n==> Usage: shell ls ~"""
if not line:
return
sp = Popen(line,
shell=True,
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
close_fds=not WINDOWS)
(fo, fe) = (sp.stdout, sp.stderr)
if PY2:
out = fo.read().strip(EOL)
err = fe.read().strip(EOL)
else:
out = fo.read().decode("utf-8")
err = fe.read().decode("utf-8")
if out:
print(out)
return
if err:
print(err.replace('isbn_', '')) |
def clean_form_template(self):
""" Check if template exists """
form_template = self.cleaned_data.get('form_template', '')
if form_template:
try:
get_template(form_template)
except TemplateDoesNotExist:
msg = _('Selected Form Template does not exist.')
raise forms.ValidationError(msg)
return form_template | Check if template exists | Below is the the instruction that describes the task:
### Input:
Check if template exists
### Response:
def clean_form_template(self):
""" Check if template exists """
form_template = self.cleaned_data.get('form_template', '')
if form_template:
try:
get_template(form_template)
except TemplateDoesNotExist:
msg = _('Selected Form Template does not exist.')
raise forms.ValidationError(msg)
return form_template |
def perform_create(self, serializer):
"""Create a resource."""
with transaction.atomic():
instance = serializer.save()
# Assign all permissions to the object contributor.
assign_contributor_permissions(instance) | Create a resource. | Below is the the instruction that describes the task:
### Input:
Create a resource.
### Response:
def perform_create(self, serializer):
"""Create a resource."""
with transaction.atomic():
instance = serializer.save()
# Assign all permissions to the object contributor.
assign_contributor_permissions(instance) |
def iscsi_settings(self):
"""Property to provide reference to iSCSI settings instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset.
"""
return ISCSISettings(
self._conn, utils.get_subresource_path_by(
self, ["@Redfish.Settings", "SettingsObject"]),
redfish_version=self.redfish_version) | Property to provide reference to iSCSI settings instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset. | Below is the the instruction that describes the task:
### Input:
Property to provide reference to iSCSI settings instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset.
### Response:
def iscsi_settings(self):
"""Property to provide reference to iSCSI settings instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset.
"""
return ISCSISettings(
self._conn, utils.get_subresource_path_by(
self, ["@Redfish.Settings", "SettingsObject"]),
redfish_version=self.redfish_version) |
def getEmailTemplate(request):
'''
This function handles the Ajax call made when a user wants a specific email template
'''
if request.method != 'POST':
return HttpResponse(_('Error, no POST data.'))
if not hasattr(request,'user'):
return HttpResponse(_('Error, not authenticated.'))
template_id = request.POST.get('template')
if not template_id:
return HttpResponse(_("Error, no template ID provided."))
try:
this_template = EmailTemplate.objects.get(id=template_id)
except ObjectDoesNotExist:
return HttpResponse(_("Error getting template."))
if this_template.groupRequired and this_template.groupRequired not in request.user.groups.all():
return HttpResponse(_("Error, no permission to access this template."))
if this_template.hideFromForm:
return HttpResponse(_("Error, no permission to access this template."))
return JsonResponse({
'subject': this_template.subject,
'content': this_template.content,
'html_content': this_template.html_content,
'richTextChoice': this_template.richTextChoice,
}) | This function handles the Ajax call made when a user wants a specific email template | Below is the the instruction that describes the task:
### Input:
This function handles the Ajax call made when a user wants a specific email template
### Response:
def getEmailTemplate(request):
'''
This function handles the Ajax call made when a user wants a specific email template
'''
if request.method != 'POST':
return HttpResponse(_('Error, no POST data.'))
if not hasattr(request,'user'):
return HttpResponse(_('Error, not authenticated.'))
template_id = request.POST.get('template')
if not template_id:
return HttpResponse(_("Error, no template ID provided."))
try:
this_template = EmailTemplate.objects.get(id=template_id)
except ObjectDoesNotExist:
return HttpResponse(_("Error getting template."))
if this_template.groupRequired and this_template.groupRequired not in request.user.groups.all():
return HttpResponse(_("Error, no permission to access this template."))
if this_template.hideFromForm:
return HttpResponse(_("Error, no permission to access this template."))
return JsonResponse({
'subject': this_template.subject,
'content': this_template.content,
'html_content': this_template.html_content,
'richTextChoice': this_template.richTextChoice,
}) |
def get_creator(self, lang=None):
""" Get the DC Creator literal value
:param lang: Language to retrieve
:return: Creator string representation
:rtype: Literal
"""
return self.metadata.get_single(key=DC.creator, lang=lang) | Get the DC Creator literal value
:param lang: Language to retrieve
:return: Creator string representation
:rtype: Literal | Below is the the instruction that describes the task:
### Input:
Get the DC Creator literal value
:param lang: Language to retrieve
:return: Creator string representation
:rtype: Literal
### Response:
def get_creator(self, lang=None):
""" Get the DC Creator literal value
:param lang: Language to retrieve
:return: Creator string representation
:rtype: Literal
"""
return self.metadata.get_single(key=DC.creator, lang=lang) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.