code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def begin_transaction(
self,
database,
options_=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Starts a new transaction.
Example:
>>> from google.cloud import firestore_v1beta1
>>>
>>> client = firestore_v1beta1.FirestoreClient()
>>>
>>> database = client.database_root_path('[PROJECT]', '[DATABASE]')
>>>
>>> response = client.begin_transaction(database)
Args:
database (str): The database name. In the format:
``projects/{project_id}/databases/{database_id}``.
options_ (Union[dict, ~google.cloud.firestore_v1beta1.types.TransactionOptions]): The options for the transaction.
Defaults to a read-write transaction.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.firestore_v1beta1.types.TransactionOptions`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.firestore_v1beta1.types.BeginTransactionResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "begin_transaction" not in self._inner_api_calls:
self._inner_api_calls[
"begin_transaction"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.begin_transaction,
default_retry=self._method_configs["BeginTransaction"].retry,
default_timeout=self._method_configs["BeginTransaction"].timeout,
client_info=self._client_info,
)
request = firestore_pb2.BeginTransactionRequest(
database=database, options=options_
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("database", database)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["begin_transaction"](
request, retry=retry, timeout=timeout, metadata=metadata
) | Starts a new transaction.
Example:
>>> from google.cloud import firestore_v1beta1
>>>
>>> client = firestore_v1beta1.FirestoreClient()
>>>
>>> database = client.database_root_path('[PROJECT]', '[DATABASE]')
>>>
>>> response = client.begin_transaction(database)
Args:
database (str): The database name. In the format:
``projects/{project_id}/databases/{database_id}``.
options_ (Union[dict, ~google.cloud.firestore_v1beta1.types.TransactionOptions]): The options for the transaction.
Defaults to a read-write transaction.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.firestore_v1beta1.types.TransactionOptions`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.firestore_v1beta1.types.BeginTransactionResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. | Below is the the instruction that describes the task:
### Input:
Starts a new transaction.
Example:
>>> from google.cloud import firestore_v1beta1
>>>
>>> client = firestore_v1beta1.FirestoreClient()
>>>
>>> database = client.database_root_path('[PROJECT]', '[DATABASE]')
>>>
>>> response = client.begin_transaction(database)
Args:
database (str): The database name. In the format:
``projects/{project_id}/databases/{database_id}``.
options_ (Union[dict, ~google.cloud.firestore_v1beta1.types.TransactionOptions]): The options for the transaction.
Defaults to a read-write transaction.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.firestore_v1beta1.types.TransactionOptions`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.firestore_v1beta1.types.BeginTransactionResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
### Response:
def begin_transaction(
self,
database,
options_=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Starts a new transaction.
Example:
>>> from google.cloud import firestore_v1beta1
>>>
>>> client = firestore_v1beta1.FirestoreClient()
>>>
>>> database = client.database_root_path('[PROJECT]', '[DATABASE]')
>>>
>>> response = client.begin_transaction(database)
Args:
database (str): The database name. In the format:
``projects/{project_id}/databases/{database_id}``.
options_ (Union[dict, ~google.cloud.firestore_v1beta1.types.TransactionOptions]): The options for the transaction.
Defaults to a read-write transaction.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.firestore_v1beta1.types.TransactionOptions`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.firestore_v1beta1.types.BeginTransactionResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "begin_transaction" not in self._inner_api_calls:
self._inner_api_calls[
"begin_transaction"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.begin_transaction,
default_retry=self._method_configs["BeginTransaction"].retry,
default_timeout=self._method_configs["BeginTransaction"].timeout,
client_info=self._client_info,
)
request = firestore_pb2.BeginTransactionRequest(
database=database, options=options_
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("database", database)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["begin_transaction"](
request, retry=retry, timeout=timeout, metadata=metadata
) |
def cursor(self, pos):
"""Returns a line at the nearest row split between tests.
Re-implemented from :meth:`AbstractDragView<sparkle.gui.abstract_drag_view.AbstractDragView.cursor>`
"""
row = self.indexAt(pos).row()
if row == -1:
row = self.model().rowCount()
row_height = self.rowHeight(0)
y = row_height*row
x = self.width()
return QtCore.QLine(0,y,x,y) | Returns a line at the nearest row split between tests.
Re-implemented from :meth:`AbstractDragView<sparkle.gui.abstract_drag_view.AbstractDragView.cursor>` | Below is the the instruction that describes the task:
### Input:
Returns a line at the nearest row split between tests.
Re-implemented from :meth:`AbstractDragView<sparkle.gui.abstract_drag_view.AbstractDragView.cursor>`
### Response:
def cursor(self, pos):
"""Returns a line at the nearest row split between tests.
Re-implemented from :meth:`AbstractDragView<sparkle.gui.abstract_drag_view.AbstractDragView.cursor>`
"""
row = self.indexAt(pos).row()
if row == -1:
row = self.model().rowCount()
row_height = self.rowHeight(0)
y = row_height*row
x = self.width()
return QtCore.QLine(0,y,x,y) |
def split(table, field, pattern, newfields=None, include_original=False,
maxsplit=0, flags=0):
"""
Add one or more new fields with values generated by splitting an
existing value around occurrences of a regular expression. E.g.::
>>> import petl as etl
>>> table1 = [['id', 'variable', 'value'],
... ['1', 'parad1', '12'],
... ['2', 'parad2', '15'],
... ['3', 'tempd1', '18'],
... ['4', 'tempd2', '19']]
>>> table2 = etl.split(table1, 'variable', 'd', ['variable', 'day'])
>>> table2
+-----+-------+----------+-----+
| id | value | variable | day |
+=====+=======+==========+=====+
| '1' | '12' | 'para' | '1' |
+-----+-------+----------+-----+
| '2' | '15' | 'para' | '2' |
+-----+-------+----------+-----+
| '3' | '18' | 'temp' | '1' |
+-----+-------+----------+-----+
| '4' | '19' | 'temp' | '2' |
+-----+-------+----------+-----+
By default the field on which the split is performed is omitted. It can
be included using the `include_original` argument.
"""
return SplitView(table, field, pattern, newfields, include_original,
maxsplit, flags) | Add one or more new fields with values generated by splitting an
existing value around occurrences of a regular expression. E.g.::
>>> import petl as etl
>>> table1 = [['id', 'variable', 'value'],
... ['1', 'parad1', '12'],
... ['2', 'parad2', '15'],
... ['3', 'tempd1', '18'],
... ['4', 'tempd2', '19']]
>>> table2 = etl.split(table1, 'variable', 'd', ['variable', 'day'])
>>> table2
+-----+-------+----------+-----+
| id | value | variable | day |
+=====+=======+==========+=====+
| '1' | '12' | 'para' | '1' |
+-----+-------+----------+-----+
| '2' | '15' | 'para' | '2' |
+-----+-------+----------+-----+
| '3' | '18' | 'temp' | '1' |
+-----+-------+----------+-----+
| '4' | '19' | 'temp' | '2' |
+-----+-------+----------+-----+
By default the field on which the split is performed is omitted. It can
be included using the `include_original` argument. | Below is the the instruction that describes the task:
### Input:
Add one or more new fields with values generated by splitting an
existing value around occurrences of a regular expression. E.g.::
>>> import petl as etl
>>> table1 = [['id', 'variable', 'value'],
... ['1', 'parad1', '12'],
... ['2', 'parad2', '15'],
... ['3', 'tempd1', '18'],
... ['4', 'tempd2', '19']]
>>> table2 = etl.split(table1, 'variable', 'd', ['variable', 'day'])
>>> table2
+-----+-------+----------+-----+
| id | value | variable | day |
+=====+=======+==========+=====+
| '1' | '12' | 'para' | '1' |
+-----+-------+----------+-----+
| '2' | '15' | 'para' | '2' |
+-----+-------+----------+-----+
| '3' | '18' | 'temp' | '1' |
+-----+-------+----------+-----+
| '4' | '19' | 'temp' | '2' |
+-----+-------+----------+-----+
By default the field on which the split is performed is omitted. It can
be included using the `include_original` argument.
### Response:
def split(table, field, pattern, newfields=None, include_original=False,
maxsplit=0, flags=0):
"""
Add one or more new fields with values generated by splitting an
existing value around occurrences of a regular expression. E.g.::
>>> import petl as etl
>>> table1 = [['id', 'variable', 'value'],
... ['1', 'parad1', '12'],
... ['2', 'parad2', '15'],
... ['3', 'tempd1', '18'],
... ['4', 'tempd2', '19']]
>>> table2 = etl.split(table1, 'variable', 'd', ['variable', 'day'])
>>> table2
+-----+-------+----------+-----+
| id | value | variable | day |
+=====+=======+==========+=====+
| '1' | '12' | 'para' | '1' |
+-----+-------+----------+-----+
| '2' | '15' | 'para' | '2' |
+-----+-------+----------+-----+
| '3' | '18' | 'temp' | '1' |
+-----+-------+----------+-----+
| '4' | '19' | 'temp' | '2' |
+-----+-------+----------+-----+
By default the field on which the split is performed is omitted. It can
be included using the `include_original` argument.
"""
return SplitView(table, field, pattern, newfields, include_original,
maxsplit, flags) |
def loadInputs(self, fname):
"""Load previsouly saved input values, and load them to GUI widgets
:param fname: file path where stashed input values are stored
:type fname: str
"""
inputsfname = os.path.join(systools.get_appdir(), fname)
try:
with open(inputsfname, 'r') as jf:
inputsdict = json.load(jf)
except:
logger = logging.getLogger('main')
logger.warning("Unable to load app data from file: {}".format(inputsfname))
inputsdict = {}
# self.display.spiketracePlot.setThreshold(inputsdict.get('threshold', 0.5))
self._thesholds = inputsdict.get('threshold', {})
self.stashedAisr = inputsdict.get('aifs', 100000)
self.ui.aifsSpnbx.setValue(self.stashedAisr)
self.ui.windowszSpnbx.setValue(inputsdict.get('windowsz', 0.1))
self.ui.binszSpnbx.setValue(inputsdict.get('binsz', 0.005))
self.saveformat = inputsdict.get('saveformat', 'hdf5')
self.ui.exploreStimEditor.setReps((inputsdict.get('ex_nreps', 5)))
self.ui.reprateSpnbx.setValue(inputsdict.get('reprate', 1))
# self.display.spiketracePlot.setRasterBounds(inputsdict.get('raster_bounds', (0.5,1)))
self.specArgs = inputsdict.get('specargs',{u'nfft':512, u'window':u'hanning', u'overlap':90, 'colormap':{'lut':None, 'state':None, 'levels':None}})
# self.display.setSpecArgs(**self.specArgs)
SpecWidget.setSpecArgs(**self.specArgs)
self.viewSettings = inputsdict.get('viewSettings', {'fontsz': 10, 'display_attributes':{}})
self.ui.stimDetails.setDisplayAttributes(self.viewSettings['display_attributes'])
font = QtGui.QFont()
font.setPointSize(self.viewSettings['fontsz'])
QtGui.QApplication.setFont(font)
self.ui.calibrationWidget.ui.nrepsSpnbx.setValue(inputsdict.get('calreps', 5))
self.calvals = inputsdict.get('calvals', {'calf':20000, 'caldb':100,
'calv':0.1, 'use_calfile':False,
'frange':(5000, 1e5), 'calname': ''})
self.calvals['use_calfile'] = False
self.calvals['calname'] = ''
self.ui.refDbSpnbx.setValue(self.calvals['caldb'])
self.ui.mphoneSensSpnbx.setValue(inputsdict.get('mphonesens', 0.004))
self.ui.mphoneDBSpnbx.setValue(MPHONE_CALDB)
# self.ui.mphoneDBSpnbx.setValue(inputsdict.get('mphonedb', 94))
Vocalization.paths = inputsdict.get('vocalpaths', [])
# load the previous sessions scaling
self.tscale = inputsdict.get('tscale', SmartSpinBox.MilliSeconds)
self.fscale = inputsdict.get('fscale', SmartSpinBox.kHz)
try:
self.updateUnitLabels(self.tscale, self.fscale)
except:
self.tscale = 'ms'
self.fscale = 'kHz'
self.updateUnitLabels(self.tscale, self.fscale)
cal_template = inputsdict.get('calparams', None)
if cal_template is not None:
try:
self.acqmodel.load_calibration_template(cal_template)
except:
logger = logging.getLogger('main')
logger.exception("Unable to load previous calibration settings")
else:
logger = logging.getLogger('main')
logger.debug('No saved calibration stimului inputs')
if 'explorestims' in inputsdict:
self.ui.exploreStimEditor.loadTemplate(inputsdict['explorestims'])
else:
logger = logging.getLogger('main')
logger.debug('No saved explore stimului inputs')
# load the previous session's Tuning Curve defaults
TCFactory.defaultInputs.update(inputsdict.get('tuning_curve', TCFactory.defaultInputs))
# set defaults then merge
self.advanced_options = {'device_name':'',
'max_voltage':1.5,
'device_max_voltage': 10.0,
'volt_amp_conversion': 0.1,
'use_attenuator': False }
if 'advanced_options' in inputsdict:
self.advanced_options.update(inputsdict['advanced_options'])
StimulusModel.setMaxVoltage(self.advanced_options['max_voltage'], self.advanced_options['device_max_voltage'])
self.display.setAmpConversionFactor(self.advanced_options['volt_amp_conversion'])
if self.advanced_options['use_attenuator']:
self.acqmodel.attenuator_connection(True)
else:
self.acqmodel.attenuator_connection(False)
self._aichans = inputsdict.get('aichans', [])
self._aichan_details = inputsdict.get('aichan_details', {})
for name, deets in self._aichan_details.items():
# make sure all field as present in details for each channel
self._aichan_details[name]['threshold'] = deets.get('threshold', 5)
self._aichan_details[name]['polarity'] = deets.get('polarity', 1)
self._aichan_details[name]['raster_bounds'] = deets.get('raster_bounds', (0.5,0.9))
self._aichan_details[name]['abs'] = deets.get('abs', True)
self.reset_device_channels()
stim_defaults = inputsdict.get('stim_view_defaults', {})
for name, state in stim_defaults.items():
StimulusView.updateDefaults(name, state) | Load previsouly saved input values, and load them to GUI widgets
:param fname: file path where stashed input values are stored
:type fname: str | Below is the the instruction that describes the task:
### Input:
Load previsouly saved input values, and load them to GUI widgets
:param fname: file path where stashed input values are stored
:type fname: str
### Response:
def loadInputs(self, fname):
"""Load previsouly saved input values, and load them to GUI widgets
:param fname: file path where stashed input values are stored
:type fname: str
"""
inputsfname = os.path.join(systools.get_appdir(), fname)
try:
with open(inputsfname, 'r') as jf:
inputsdict = json.load(jf)
except:
logger = logging.getLogger('main')
logger.warning("Unable to load app data from file: {}".format(inputsfname))
inputsdict = {}
# self.display.spiketracePlot.setThreshold(inputsdict.get('threshold', 0.5))
self._thesholds = inputsdict.get('threshold', {})
self.stashedAisr = inputsdict.get('aifs', 100000)
self.ui.aifsSpnbx.setValue(self.stashedAisr)
self.ui.windowszSpnbx.setValue(inputsdict.get('windowsz', 0.1))
self.ui.binszSpnbx.setValue(inputsdict.get('binsz', 0.005))
self.saveformat = inputsdict.get('saveformat', 'hdf5')
self.ui.exploreStimEditor.setReps((inputsdict.get('ex_nreps', 5)))
self.ui.reprateSpnbx.setValue(inputsdict.get('reprate', 1))
# self.display.spiketracePlot.setRasterBounds(inputsdict.get('raster_bounds', (0.5,1)))
self.specArgs = inputsdict.get('specargs',{u'nfft':512, u'window':u'hanning', u'overlap':90, 'colormap':{'lut':None, 'state':None, 'levels':None}})
# self.display.setSpecArgs(**self.specArgs)
SpecWidget.setSpecArgs(**self.specArgs)
self.viewSettings = inputsdict.get('viewSettings', {'fontsz': 10, 'display_attributes':{}})
self.ui.stimDetails.setDisplayAttributes(self.viewSettings['display_attributes'])
font = QtGui.QFont()
font.setPointSize(self.viewSettings['fontsz'])
QtGui.QApplication.setFont(font)
self.ui.calibrationWidget.ui.nrepsSpnbx.setValue(inputsdict.get('calreps', 5))
self.calvals = inputsdict.get('calvals', {'calf':20000, 'caldb':100,
'calv':0.1, 'use_calfile':False,
'frange':(5000, 1e5), 'calname': ''})
self.calvals['use_calfile'] = False
self.calvals['calname'] = ''
self.ui.refDbSpnbx.setValue(self.calvals['caldb'])
self.ui.mphoneSensSpnbx.setValue(inputsdict.get('mphonesens', 0.004))
self.ui.mphoneDBSpnbx.setValue(MPHONE_CALDB)
# self.ui.mphoneDBSpnbx.setValue(inputsdict.get('mphonedb', 94))
Vocalization.paths = inputsdict.get('vocalpaths', [])
# load the previous sessions scaling
self.tscale = inputsdict.get('tscale', SmartSpinBox.MilliSeconds)
self.fscale = inputsdict.get('fscale', SmartSpinBox.kHz)
try:
self.updateUnitLabels(self.tscale, self.fscale)
except:
self.tscale = 'ms'
self.fscale = 'kHz'
self.updateUnitLabels(self.tscale, self.fscale)
cal_template = inputsdict.get('calparams', None)
if cal_template is not None:
try:
self.acqmodel.load_calibration_template(cal_template)
except:
logger = logging.getLogger('main')
logger.exception("Unable to load previous calibration settings")
else:
logger = logging.getLogger('main')
logger.debug('No saved calibration stimului inputs')
if 'explorestims' in inputsdict:
self.ui.exploreStimEditor.loadTemplate(inputsdict['explorestims'])
else:
logger = logging.getLogger('main')
logger.debug('No saved explore stimului inputs')
# load the previous session's Tuning Curve defaults
TCFactory.defaultInputs.update(inputsdict.get('tuning_curve', TCFactory.defaultInputs))
# set defaults then merge
self.advanced_options = {'device_name':'',
'max_voltage':1.5,
'device_max_voltage': 10.0,
'volt_amp_conversion': 0.1,
'use_attenuator': False }
if 'advanced_options' in inputsdict:
self.advanced_options.update(inputsdict['advanced_options'])
StimulusModel.setMaxVoltage(self.advanced_options['max_voltage'], self.advanced_options['device_max_voltage'])
self.display.setAmpConversionFactor(self.advanced_options['volt_amp_conversion'])
if self.advanced_options['use_attenuator']:
self.acqmodel.attenuator_connection(True)
else:
self.acqmodel.attenuator_connection(False)
self._aichans = inputsdict.get('aichans', [])
self._aichan_details = inputsdict.get('aichan_details', {})
for name, deets in self._aichan_details.items():
# make sure all field as present in details for each channel
self._aichan_details[name]['threshold'] = deets.get('threshold', 5)
self._aichan_details[name]['polarity'] = deets.get('polarity', 1)
self._aichan_details[name]['raster_bounds'] = deets.get('raster_bounds', (0.5,0.9))
self._aichan_details[name]['abs'] = deets.get('abs', True)
self.reset_device_channels()
stim_defaults = inputsdict.get('stim_view_defaults', {})
for name, state in stim_defaults.items():
StimulusView.updateDefaults(name, state) |
def index(self, value):
"""
Return index of *value* in self.
Raises ValueError if *value* is not found.
"""
# pylint: disable=arguments-differ
for idx, val in enumerate(self):
if value == val:
return idx
raise ValueError('{0!r} is not in dict'.format(value)) | Return index of *value* in self.
Raises ValueError if *value* is not found. | Below is the the instruction that describes the task:
### Input:
Return index of *value* in self.
Raises ValueError if *value* is not found.
### Response:
def index(self, value):
"""
Return index of *value* in self.
Raises ValueError if *value* is not found.
"""
# pylint: disable=arguments-differ
for idx, val in enumerate(self):
if value == val:
return idx
raise ValueError('{0!r} is not in dict'.format(value)) |
def _format_date(self, obj) -> str:
"""
Short date format.
:param obj: date or datetime or None
:return: str
"""
if obj is None:
return ''
if isinstance(obj, datetime):
obj = obj.date()
return date_format(obj, 'SHORT_DATE_FORMAT') | Short date format.
:param obj: date or datetime or None
:return: str | Below is the the instruction that describes the task:
### Input:
Short date format.
:param obj: date or datetime or None
:return: str
### Response:
def _format_date(self, obj) -> str:
"""
Short date format.
:param obj: date or datetime or None
:return: str
"""
if obj is None:
return ''
if isinstance(obj, datetime):
obj = obj.date()
return date_format(obj, 'SHORT_DATE_FORMAT') |
def get_hash(self):
"""Generate and return the dict index hash of the given queue item.
Note:
Cookies should not be included in the hash calculation because
otherwise requests are crawled multiple times with e.g. different
session keys, causing infinite crawling recursion.
Note:
At this moment the keys do not actually get hashed since it works perfectly without and
since hashing the keys requires us to built hash collision management.
Returns:
str: The hash of the given queue item.
"""
if self.__index_hash:
return self.__index_hash
key = self.request.method
key += URLHelper.get_protocol(self.request.url)
key += URLHelper.get_subdomain(self.request.url)
key += URLHelper.get_hostname(self.request.url)
key += URLHelper.get_tld(self.request.url)
key += URLHelper.get_path(self.request.url)
key += str(URLHelper.get_ordered_params(self.request.url))
if self.request.data is not None:
key += str(self.request.data.keys())
self.__index_hash = key
return self.__index_hash | Generate and return the dict index hash of the given queue item.
Note:
Cookies should not be included in the hash calculation because
otherwise requests are crawled multiple times with e.g. different
session keys, causing infinite crawling recursion.
Note:
At this moment the keys do not actually get hashed since it works perfectly without and
since hashing the keys requires us to built hash collision management.
Returns:
str: The hash of the given queue item. | Below is the the instruction that describes the task:
### Input:
Generate and return the dict index hash of the given queue item.
Note:
Cookies should not be included in the hash calculation because
otherwise requests are crawled multiple times with e.g. different
session keys, causing infinite crawling recursion.
Note:
At this moment the keys do not actually get hashed since it works perfectly without and
since hashing the keys requires us to built hash collision management.
Returns:
str: The hash of the given queue item.
### Response:
def get_hash(self):
"""Generate and return the dict index hash of the given queue item.
Note:
Cookies should not be included in the hash calculation because
otherwise requests are crawled multiple times with e.g. different
session keys, causing infinite crawling recursion.
Note:
At this moment the keys do not actually get hashed since it works perfectly without and
since hashing the keys requires us to built hash collision management.
Returns:
str: The hash of the given queue item.
"""
if self.__index_hash:
return self.__index_hash
key = self.request.method
key += URLHelper.get_protocol(self.request.url)
key += URLHelper.get_subdomain(self.request.url)
key += URLHelper.get_hostname(self.request.url)
key += URLHelper.get_tld(self.request.url)
key += URLHelper.get_path(self.request.url)
key += str(URLHelper.get_ordered_params(self.request.url))
if self.request.data is not None:
key += str(self.request.data.keys())
self.__index_hash = key
return self.__index_hash |
def worker_unreject(self, chosen_hit, assignment_ids = None):
''' Unreject worker '''
if chosen_hit:
workers = self.amt_services.get_workers("Rejected")
assignment_ids = [worker['assignmentId'] for worker in workers if \
worker['hitId'] == chosen_hit]
for assignment_id in assignment_ids:
success = self.amt_services.unreject_worker(assignment_id)
if success:
print 'unrejected %s' % (assignment_id)
else:
print '*** failed to unreject', assignment_id | Unreject worker | Below is the the instruction that describes the task:
### Input:
Unreject worker
### Response:
def worker_unreject(self, chosen_hit, assignment_ids = None):
''' Unreject worker '''
if chosen_hit:
workers = self.amt_services.get_workers("Rejected")
assignment_ids = [worker['assignmentId'] for worker in workers if \
worker['hitId'] == chosen_hit]
for assignment_id in assignment_ids:
success = self.amt_services.unreject_worker(assignment_id)
if success:
print 'unrejected %s' % (assignment_id)
else:
print '*** failed to unreject', assignment_id |
def map_masked_1d_array_to_2d_array_from_array_1d_shape_and_one_to_two(array_1d, shape, one_to_two):
"""For a 1D array that was computed by mapping unmasked values from a 2D array of shape (rows, columns), map its \
values back to the original 2D array where masked values are set to zero.
This uses a 1D array 'one_to_two' where each index gives the 2D pixel indexes of the 1D array's unmasked pixels, \
for example:
- If one_to_two[0] = [0,0], the first value of the 1D array maps to the pixel [0,0] of the 2D array.
- If one_to_two[1] = [0,1], the second value of the 1D array maps to the pixel [0,1] of the 2D array.
- If one_to_two[4] = [1,1], the fifth value of the 1D array maps to the pixel [1,1] of the 2D array.
Parameters
----------
array_1d : ndarray
The 1D array of values which are mapped to a 2D array.
shape : (int, int)
The shape of the 2D array which the pixels are defined on.
one_to_two : ndarray
An array describing the 2D array index that every 1D array index maps too.
Returns
--------
ndarray
A 2D array of values mapped from the 1D array with dimensions shape.
Examples
--------
one_to_two = np.array([[0,1], [1,0], [1,1], [1,2], [2,1]])
array_1d = np.array([[2.0, 4.0, 5.0, 6.0, 8.0])
array_2d = map_masked_1d_array_to_2d_array_from_array_1d_shape_and_one_to_two(array_1d=array_1d, shape=(3,3),
one_to_two=one_to_two)
"""
array_2d = np.zeros(shape)
for index in range(len(one_to_two)):
array_2d[one_to_two[index, 0], one_to_two[index, 1]] = array_1d[index]
return array_2d | For a 1D array that was computed by mapping unmasked values from a 2D array of shape (rows, columns), map its \
values back to the original 2D array where masked values are set to zero.
This uses a 1D array 'one_to_two' where each index gives the 2D pixel indexes of the 1D array's unmasked pixels, \
for example:
- If one_to_two[0] = [0,0], the first value of the 1D array maps to the pixel [0,0] of the 2D array.
- If one_to_two[1] = [0,1], the second value of the 1D array maps to the pixel [0,1] of the 2D array.
- If one_to_two[4] = [1,1], the fifth value of the 1D array maps to the pixel [1,1] of the 2D array.
Parameters
----------
array_1d : ndarray
The 1D array of values which are mapped to a 2D array.
shape : (int, int)
The shape of the 2D array which the pixels are defined on.
one_to_two : ndarray
An array describing the 2D array index that every 1D array index maps too.
Returns
--------
ndarray
A 2D array of values mapped from the 1D array with dimensions shape.
Examples
--------
one_to_two = np.array([[0,1], [1,0], [1,1], [1,2], [2,1]])
array_1d = np.array([[2.0, 4.0, 5.0, 6.0, 8.0])
array_2d = map_masked_1d_array_to_2d_array_from_array_1d_shape_and_one_to_two(array_1d=array_1d, shape=(3,3),
one_to_two=one_to_two) | Below is the the instruction that describes the task:
### Input:
For a 1D array that was computed by mapping unmasked values from a 2D array of shape (rows, columns), map its \
values back to the original 2D array where masked values are set to zero.
This uses a 1D array 'one_to_two' where each index gives the 2D pixel indexes of the 1D array's unmasked pixels, \
for example:
- If one_to_two[0] = [0,0], the first value of the 1D array maps to the pixel [0,0] of the 2D array.
- If one_to_two[1] = [0,1], the second value of the 1D array maps to the pixel [0,1] of the 2D array.
- If one_to_two[4] = [1,1], the fifth value of the 1D array maps to the pixel [1,1] of the 2D array.
Parameters
----------
array_1d : ndarray
The 1D array of values which are mapped to a 2D array.
shape : (int, int)
The shape of the 2D array which the pixels are defined on.
one_to_two : ndarray
An array describing the 2D array index that every 1D array index maps too.
Returns
--------
ndarray
A 2D array of values mapped from the 1D array with dimensions shape.
Examples
--------
one_to_two = np.array([[0,1], [1,0], [1,1], [1,2], [2,1]])
array_1d = np.array([[2.0, 4.0, 5.0, 6.0, 8.0])
array_2d = map_masked_1d_array_to_2d_array_from_array_1d_shape_and_one_to_two(array_1d=array_1d, shape=(3,3),
one_to_two=one_to_two)
### Response:
def map_masked_1d_array_to_2d_array_from_array_1d_shape_and_one_to_two(array_1d, shape, one_to_two):
"""For a 1D array that was computed by mapping unmasked values from a 2D array of shape (rows, columns), map its \
values back to the original 2D array where masked values are set to zero.
This uses a 1D array 'one_to_two' where each index gives the 2D pixel indexes of the 1D array's unmasked pixels, \
for example:
- If one_to_two[0] = [0,0], the first value of the 1D array maps to the pixel [0,0] of the 2D array.
- If one_to_two[1] = [0,1], the second value of the 1D array maps to the pixel [0,1] of the 2D array.
- If one_to_two[4] = [1,1], the fifth value of the 1D array maps to the pixel [1,1] of the 2D array.
Parameters
----------
array_1d : ndarray
The 1D array of values which are mapped to a 2D array.
shape : (int, int)
The shape of the 2D array which the pixels are defined on.
one_to_two : ndarray
An array describing the 2D array index that every 1D array index maps too.
Returns
--------
ndarray
A 2D array of values mapped from the 1D array with dimensions shape.
Examples
--------
one_to_two = np.array([[0,1], [1,0], [1,1], [1,2], [2,1]])
array_1d = np.array([[2.0, 4.0, 5.0, 6.0, 8.0])
array_2d = map_masked_1d_array_to_2d_array_from_array_1d_shape_and_one_to_two(array_1d=array_1d, shape=(3,3),
one_to_two=one_to_two)
"""
array_2d = np.zeros(shape)
for index in range(len(one_to_two)):
array_2d[one_to_two[index, 0], one_to_two[index, 1]] = array_1d[index]
return array_2d |
def check_token(token):
''' Verify http header token authentification '''
user = models.User.objects(api_key=token).first()
return user or None | Verify http header token authentification | Below is the the instruction that describes the task:
### Input:
Verify http header token authentification
### Response:
def check_token(token):
''' Verify http header token authentification '''
user = models.User.objects(api_key=token).first()
return user or None |
def groupby(iterable, key=0, filter=None):
"""
wrapper to itertools.groupby that returns a list of each group, rather
than a generator and accepts integers or strings as the key and
automatically converts them to callables with itemgetter(key)
Arguments:
iterable: iterable
key: string, int or callable that tells how to group
Returns:
an iterable where each item is the key and a *list* of that
group. (itertools.groupby returns a generator of that group).
e.g. groupby(iterable, 0)
"""
if isinstance(key, (basestring, int)):
key = itemgetter(key)
elif isinstance(key, (tuple, list)):
key = itemgetter(*key)
for label, grp in igroupby(iterable, key):
yield label, list(grp) | wrapper to itertools.groupby that returns a list of each group, rather
than a generator and accepts integers or strings as the key and
automatically converts them to callables with itemgetter(key)
Arguments:
iterable: iterable
key: string, int or callable that tells how to group
Returns:
an iterable where each item is the key and a *list* of that
group. (itertools.groupby returns a generator of that group).
e.g. groupby(iterable, 0) | Below is the the instruction that describes the task:
### Input:
wrapper to itertools.groupby that returns a list of each group, rather
than a generator and accepts integers or strings as the key and
automatically converts them to callables with itemgetter(key)
Arguments:
iterable: iterable
key: string, int or callable that tells how to group
Returns:
an iterable where each item is the key and a *list* of that
group. (itertools.groupby returns a generator of that group).
e.g. groupby(iterable, 0)
### Response:
def groupby(iterable, key=0, filter=None):
"""
wrapper to itertools.groupby that returns a list of each group, rather
than a generator and accepts integers or strings as the key and
automatically converts them to callables with itemgetter(key)
Arguments:
iterable: iterable
key: string, int or callable that tells how to group
Returns:
an iterable where each item is the key and a *list* of that
group. (itertools.groupby returns a generator of that group).
e.g. groupby(iterable, 0)
"""
if isinstance(key, (basestring, int)):
key = itemgetter(key)
elif isinstance(key, (tuple, list)):
key = itemgetter(*key)
for label, grp in igroupby(iterable, key):
yield label, list(grp) |
def on_disconnect(self=None) -> callable:
"""Use this decorator to automatically register a function for handling disconnections.
This does the same thing as :meth:`add_handler` using the :class:`DisconnectHandler`.
"""
def decorator(func: callable) -> Handler:
handler = pyrogram.DisconnectHandler(func)
if self is not None:
self.add_handler(handler)
return handler
return decorator | Use this decorator to automatically register a function for handling disconnections.
This does the same thing as :meth:`add_handler` using the :class:`DisconnectHandler`. | Below is the the instruction that describes the task:
### Input:
Use this decorator to automatically register a function for handling disconnections.
This does the same thing as :meth:`add_handler` using the :class:`DisconnectHandler`.
### Response:
def on_disconnect(self=None) -> callable:
"""Use this decorator to automatically register a function for handling disconnections.
This does the same thing as :meth:`add_handler` using the :class:`DisconnectHandler`.
"""
def decorator(func: callable) -> Handler:
handler = pyrogram.DisconnectHandler(func)
if self is not None:
self.add_handler(handler)
return handler
return decorator |
def com_google_fonts_check_metadata_valid_copyright(font_metadata):
"""Copyright notices match canonical pattern in METADATA.pb"""
import re
string = font_metadata.copyright
does_match = re.search(r'Copyright [0-9]{4} The .* Project Authors \([^\@]*\)',
string)
if does_match:
yield PASS, "METADATA.pb copyright string is good"
else:
yield FAIL, ("METADATA.pb: Copyright notices should match"
" a pattern similar to:"
" 'Copyright 2017 The Familyname"
" Project Authors (git url)'\n"
"But instead we have got:"
" '{}'").format(string) | Copyright notices match canonical pattern in METADATA.pb | Below is the the instruction that describes the task:
### Input:
Copyright notices match canonical pattern in METADATA.pb
### Response:
def com_google_fonts_check_metadata_valid_copyright(font_metadata):
"""Copyright notices match canonical pattern in METADATA.pb"""
import re
string = font_metadata.copyright
does_match = re.search(r'Copyright [0-9]{4} The .* Project Authors \([^\@]*\)',
string)
if does_match:
yield PASS, "METADATA.pb copyright string is good"
else:
yield FAIL, ("METADATA.pb: Copyright notices should match"
" a pattern similar to:"
" 'Copyright 2017 The Familyname"
" Project Authors (git url)'\n"
"But instead we have got:"
" '{}'").format(string) |
def remote_run(cmd, instance_name, detach=False, retries=1):
"""Run command on GCS instance, optionally detached."""
if detach:
cmd = SCREEN.format(command=cmd)
args = SSH.format(instance_name=instance_name).split()
args.append(cmd)
for i in range(retries + 1):
try:
if i > 0:
tf.logging.info("Retry %d for %s", i, args)
return sp.check_call(args)
except sp.CalledProcessError as e:
if i == retries:
raise e | Run command on GCS instance, optionally detached. | Below is the the instruction that describes the task:
### Input:
Run command on GCS instance, optionally detached.
### Response:
def remote_run(cmd, instance_name, detach=False, retries=1):
"""Run command on GCS instance, optionally detached."""
if detach:
cmd = SCREEN.format(command=cmd)
args = SSH.format(instance_name=instance_name).split()
args.append(cmd)
for i in range(retries + 1):
try:
if i > 0:
tf.logging.info("Retry %d for %s", i, args)
return sp.check_call(args)
except sp.CalledProcessError as e:
if i == retries:
raise e |
def popitem(self):
"""Remove and return the `(key, value)` pair least recently used that
has not already expired.
"""
with self.__timer as time:
self.expire(time)
try:
key = next(iter(self.__links))
except StopIteration:
raise KeyError('%s is empty' % self.__class__.__name__)
else:
return (key, self.pop(key)) | Remove and return the `(key, value)` pair least recently used that
has not already expired. | Below is the the instruction that describes the task:
### Input:
Remove and return the `(key, value)` pair least recently used that
has not already expired.
### Response:
def popitem(self):
"""Remove and return the `(key, value)` pair least recently used that
has not already expired.
"""
with self.__timer as time:
self.expire(time)
try:
key = next(iter(self.__links))
except StopIteration:
raise KeyError('%s is empty' % self.__class__.__name__)
else:
return (key, self.pop(key)) |
def check_resume(args: argparse.Namespace, output_folder: str) -> bool:
"""
Check if we should resume a broken training run.
:param args: Arguments as returned by argparse.
:param output_folder: Main output folder for the model.
:return: Flag signaling if we are resuming training and the directory with
the training status.
"""
resume_training = False
training_state_dir = os.path.join(output_folder, C.TRAINING_STATE_DIRNAME)
if os.path.exists(output_folder):
if args.overwrite_output:
logger.info("Removing existing output folder %s.", output_folder)
shutil.rmtree(output_folder)
os.makedirs(output_folder)
elif os.path.exists(training_state_dir):
old_args = vars(arguments.load_args(os.path.join(output_folder, C.ARGS_STATE_NAME)))
arg_diffs = _dict_difference(vars(args), old_args) | _dict_difference(old_args, vars(args))
# Remove args that may differ without affecting the training.
arg_diffs -= set(C.ARGS_MAY_DIFFER)
# allow different device-ids provided their total count is the same
if 'device_ids' in arg_diffs and len(old_args['device_ids']) == len(vars(args)['device_ids']):
arg_diffs.discard('device_ids')
if not arg_diffs:
resume_training = True
else:
# We do not have the logger yet
logger.error("Mismatch in arguments for training continuation.")
logger.error("Differing arguments: %s.", ", ".join(arg_diffs))
sys.exit(1)
elif os.path.exists(os.path.join(output_folder, C.PARAMS_BEST_NAME)):
logger.error("Refusing to overwrite model folder %s as it seems to contain a trained model.", output_folder)
sys.exit(1)
else:
logger.info("The output folder %s already exists, but no training state or parameter file was found. "
"Will start training from scratch.", output_folder)
else:
os.makedirs(output_folder)
return resume_training | Check if we should resume a broken training run.
:param args: Arguments as returned by argparse.
:param output_folder: Main output folder for the model.
:return: Flag signaling if we are resuming training and the directory with
the training status. | Below is the the instruction that describes the task:
### Input:
Check if we should resume a broken training run.
:param args: Arguments as returned by argparse.
:param output_folder: Main output folder for the model.
:return: Flag signaling if we are resuming training and the directory with
the training status.
### Response:
def check_resume(args: argparse.Namespace, output_folder: str) -> bool:
"""
Check if we should resume a broken training run.
:param args: Arguments as returned by argparse.
:param output_folder: Main output folder for the model.
:return: Flag signaling if we are resuming training and the directory with
the training status.
"""
resume_training = False
training_state_dir = os.path.join(output_folder, C.TRAINING_STATE_DIRNAME)
if os.path.exists(output_folder):
if args.overwrite_output:
logger.info("Removing existing output folder %s.", output_folder)
shutil.rmtree(output_folder)
os.makedirs(output_folder)
elif os.path.exists(training_state_dir):
old_args = vars(arguments.load_args(os.path.join(output_folder, C.ARGS_STATE_NAME)))
arg_diffs = _dict_difference(vars(args), old_args) | _dict_difference(old_args, vars(args))
# Remove args that may differ without affecting the training.
arg_diffs -= set(C.ARGS_MAY_DIFFER)
# allow different device-ids provided their total count is the same
if 'device_ids' in arg_diffs and len(old_args['device_ids']) == len(vars(args)['device_ids']):
arg_diffs.discard('device_ids')
if not arg_diffs:
resume_training = True
else:
# We do not have the logger yet
logger.error("Mismatch in arguments for training continuation.")
logger.error("Differing arguments: %s.", ", ".join(arg_diffs))
sys.exit(1)
elif os.path.exists(os.path.join(output_folder, C.PARAMS_BEST_NAME)):
logger.error("Refusing to overwrite model folder %s as it seems to contain a trained model.", output_folder)
sys.exit(1)
else:
logger.info("The output folder %s already exists, but no training state or parameter file was found. "
"Will start training from scratch.", output_folder)
else:
os.makedirs(output_folder)
return resume_training |
def format_map(self, format_string, mapping):
"""format a string by a map
Args:
format_string(str): A format string
mapping(dict): A map to format the string
Returns:
A formatted string.
Raises:
KeyError: if key is not provided by the given map.
"""
return self.vformat(format_string, args=None, kwargs=mapping) | format a string by a map
Args:
format_string(str): A format string
mapping(dict): A map to format the string
Returns:
A formatted string.
Raises:
KeyError: if key is not provided by the given map. | Below is the the instruction that describes the task:
### Input:
format a string by a map
Args:
format_string(str): A format string
mapping(dict): A map to format the string
Returns:
A formatted string.
Raises:
KeyError: if key is not provided by the given map.
### Response:
def format_map(self, format_string, mapping):
"""format a string by a map
Args:
format_string(str): A format string
mapping(dict): A map to format the string
Returns:
A formatted string.
Raises:
KeyError: if key is not provided by the given map.
"""
return self.vformat(format_string, args=None, kwargs=mapping) |
def slices(self):
"""Returns a generator yielding tuple of slice objects.
Order is not guaranteed.
"""
if self.chunks is None:
yield tuple(slice(None, s) for s in self.shape)
else:
ceilings = tuple(-(-s // c) for s, c in zip(self.shape, self.chunks))
for idx in np.ndindex(ceilings): # could also use itertools.product
out = []
for i, c, s in zip(idx, self.chunks, self.shape):
start = i * c
stop = min(start + c, s + 1)
out.append(slice(start, stop, 1))
yield tuple(out) | Returns a generator yielding tuple of slice objects.
Order is not guaranteed. | Below is the the instruction that describes the task:
### Input:
Returns a generator yielding tuple of slice objects.
Order is not guaranteed.
### Response:
def slices(self):
"""Returns a generator yielding tuple of slice objects.
Order is not guaranteed.
"""
if self.chunks is None:
yield tuple(slice(None, s) for s in self.shape)
else:
ceilings = tuple(-(-s // c) for s, c in zip(self.shape, self.chunks))
for idx in np.ndindex(ceilings): # could also use itertools.product
out = []
for i, c, s in zip(idx, self.chunks, self.shape):
start = i * c
stop = min(start + c, s + 1)
out.append(slice(start, stop, 1))
yield tuple(out) |
def _radial_profile(autocorr, r_max, nbins=100):
r"""
Helper functions to calculate the radial profile of the autocorrelation
Masks the image in radial segments from the center and averages the values
The distance values are normalized and 100 bins are used as default.
Parameters
----------
autocorr : ND-array
The image of autocorrelation produced by FFT
r_max : int or float
The maximum radius in pixels to sum the image over
Returns
-------
result : named_tuple
A named tupling containing an array of ``bins`` of radial position
and an array of ``counts`` in each bin.
"""
if len(autocorr.shape) == 2:
adj = sp.reshape(autocorr.shape, [2, 1, 1])
inds = sp.indices(autocorr.shape) - adj/2
dt = sp.sqrt(inds[0]**2 + inds[1]**2)
elif len(autocorr.shape) == 3:
adj = sp.reshape(autocorr.shape, [3, 1, 1, 1])
inds = sp.indices(autocorr.shape) - adj/2
dt = sp.sqrt(inds[0]**2 + inds[1]**2 + inds[2]**2)
else:
raise Exception('Image dimensions must be 2 or 3')
bin_size = np.int(np.ceil(r_max/nbins))
bins = np.arange(bin_size, r_max, step=bin_size)
radial_sum = np.zeros_like(bins)
for i, r in enumerate(bins):
# Generate Radial Mask from dt using bins
mask = (dt <= r) * (dt > (r-bin_size))
radial_sum[i] = np.sum(autocorr[mask])/np.sum(mask)
# Return normalized bin and radially summed autoc
norm_autoc_radial = radial_sum/np.max(autocorr)
tpcf = namedtuple('two_point_correlation_function',
('distance', 'probability'))
return tpcf(bins, norm_autoc_radial) | r"""
Helper functions to calculate the radial profile of the autocorrelation
Masks the image in radial segments from the center and averages the values
The distance values are normalized and 100 bins are used as default.
Parameters
----------
autocorr : ND-array
The image of autocorrelation produced by FFT
r_max : int or float
The maximum radius in pixels to sum the image over
Returns
-------
result : named_tuple
A named tupling containing an array of ``bins`` of radial position
and an array of ``counts`` in each bin. | Below is the the instruction that describes the task:
### Input:
r"""
Helper functions to calculate the radial profile of the autocorrelation
Masks the image in radial segments from the center and averages the values
The distance values are normalized and 100 bins are used as default.
Parameters
----------
autocorr : ND-array
The image of autocorrelation produced by FFT
r_max : int or float
The maximum radius in pixels to sum the image over
Returns
-------
result : named_tuple
A named tupling containing an array of ``bins`` of radial position
and an array of ``counts`` in each bin.
### Response:
def _radial_profile(autocorr, r_max, nbins=100):
r"""
Helper functions to calculate the radial profile of the autocorrelation
Masks the image in radial segments from the center and averages the values
The distance values are normalized and 100 bins are used as default.
Parameters
----------
autocorr : ND-array
The image of autocorrelation produced by FFT
r_max : int or float
The maximum radius in pixels to sum the image over
Returns
-------
result : named_tuple
A named tupling containing an array of ``bins`` of radial position
and an array of ``counts`` in each bin.
"""
if len(autocorr.shape) == 2:
adj = sp.reshape(autocorr.shape, [2, 1, 1])
inds = sp.indices(autocorr.shape) - adj/2
dt = sp.sqrt(inds[0]**2 + inds[1]**2)
elif len(autocorr.shape) == 3:
adj = sp.reshape(autocorr.shape, [3, 1, 1, 1])
inds = sp.indices(autocorr.shape) - adj/2
dt = sp.sqrt(inds[0]**2 + inds[1]**2 + inds[2]**2)
else:
raise Exception('Image dimensions must be 2 or 3')
bin_size = np.int(np.ceil(r_max/nbins))
bins = np.arange(bin_size, r_max, step=bin_size)
radial_sum = np.zeros_like(bins)
for i, r in enumerate(bins):
# Generate Radial Mask from dt using bins
mask = (dt <= r) * (dt > (r-bin_size))
radial_sum[i] = np.sum(autocorr[mask])/np.sum(mask)
# Return normalized bin and radially summed autoc
norm_autoc_radial = radial_sum/np.max(autocorr)
tpcf = namedtuple('two_point_correlation_function',
('distance', 'probability'))
return tpcf(bins, norm_autoc_radial) |
def _sumDiceRolls(self, rollList):
"""convert from dice roll structure to a single integer result"""
if isinstance(rollList, RollList):
self.rolls.append(rollList)
return rollList.sum()
else:
return rollList | convert from dice roll structure to a single integer result | Below is the the instruction that describes the task:
### Input:
convert from dice roll structure to a single integer result
### Response:
def _sumDiceRolls(self, rollList):
"""convert from dice roll structure to a single integer result"""
if isinstance(rollList, RollList):
self.rolls.append(rollList)
return rollList.sum()
else:
return rollList |
def setType(self, polygonID, polygonType):
"""setType(string, string) -> None
Sets the (abstract) type of the polygon.
"""
self._connection._beginMessage(
tc.CMD_SET_POLYGON_VARIABLE, tc.VAR_TYPE, polygonID, 1 + 4 + len(polygonType))
self._connection._packString(polygonType)
self._connection._sendExact() | setType(string, string) -> None
Sets the (abstract) type of the polygon. | Below is the the instruction that describes the task:
### Input:
setType(string, string) -> None
Sets the (abstract) type of the polygon.
### Response:
def setType(self, polygonID, polygonType):
"""setType(string, string) -> None
Sets the (abstract) type of the polygon.
"""
self._connection._beginMessage(
tc.CMD_SET_POLYGON_VARIABLE, tc.VAR_TYPE, polygonID, 1 + 4 + len(polygonType))
self._connection._packString(polygonType)
self._connection._sendExact() |
def pixel_to_utm(row, column, transform):
""" Convert pixel coordinate to UTM coordinate given a transform
:param row: row pixel coordinate
:type row: int or float
:param column: column pixel coordinate
:type column: int or float
:param transform: georeferencing transform of the image, e.g. `(x_upper_left, res_x, 0, y_upper_left, 0, -res_y)`
:type transform: tuple or list
:return: east, north UTM coordinates
:rtype: float, float
"""
east = transform[0] + column * transform[1]
north = transform[3] + row * transform[5]
return east, north | Convert pixel coordinate to UTM coordinate given a transform
:param row: row pixel coordinate
:type row: int or float
:param column: column pixel coordinate
:type column: int or float
:param transform: georeferencing transform of the image, e.g. `(x_upper_left, res_x, 0, y_upper_left, 0, -res_y)`
:type transform: tuple or list
:return: east, north UTM coordinates
:rtype: float, float | Below is the the instruction that describes the task:
### Input:
Convert pixel coordinate to UTM coordinate given a transform
:param row: row pixel coordinate
:type row: int or float
:param column: column pixel coordinate
:type column: int or float
:param transform: georeferencing transform of the image, e.g. `(x_upper_left, res_x, 0, y_upper_left, 0, -res_y)`
:type transform: tuple or list
:return: east, north UTM coordinates
:rtype: float, float
### Response:
def pixel_to_utm(row, column, transform):
""" Convert pixel coordinate to UTM coordinate given a transform
:param row: row pixel coordinate
:type row: int or float
:param column: column pixel coordinate
:type column: int or float
:param transform: georeferencing transform of the image, e.g. `(x_upper_left, res_x, 0, y_upper_left, 0, -res_y)`
:type transform: tuple or list
:return: east, north UTM coordinates
:rtype: float, float
"""
east = transform[0] + column * transform[1]
north = transform[3] + row * transform[5]
return east, north |
def get_mipmap_pixel(
self, left: float, top: float, right: float, bottom: float
) -> Tuple[int, int, int]:
"""Get the average color of a rectangle in this Image.
Parameters should stay within the following limits:
* 0 <= left < right < Image.width
* 0 <= top < bottom < Image.height
Args:
left (float): Left corner of the region.
top (float): Top corner of the region.
right (float): Right corner of the region.
bottom (float): Bottom corner of the region.
Returns:
Tuple[int, int, int]:
An (r, g, b) tuple containing the averaged color value.
Values are in a 0 to 255 range.
"""
color = lib.TCOD_image_get_mipmap_pixel(
self.image_c, left, top, right, bottom
)
return (color.r, color.g, color.b) | Get the average color of a rectangle in this Image.
Parameters should stay within the following limits:
* 0 <= left < right < Image.width
* 0 <= top < bottom < Image.height
Args:
left (float): Left corner of the region.
top (float): Top corner of the region.
right (float): Right corner of the region.
bottom (float): Bottom corner of the region.
Returns:
Tuple[int, int, int]:
An (r, g, b) tuple containing the averaged color value.
Values are in a 0 to 255 range. | Below is the the instruction that describes the task:
### Input:
Get the average color of a rectangle in this Image.
Parameters should stay within the following limits:
* 0 <= left < right < Image.width
* 0 <= top < bottom < Image.height
Args:
left (float): Left corner of the region.
top (float): Top corner of the region.
right (float): Right corner of the region.
bottom (float): Bottom corner of the region.
Returns:
Tuple[int, int, int]:
An (r, g, b) tuple containing the averaged color value.
Values are in a 0 to 255 range.
### Response:
def get_mipmap_pixel(
self, left: float, top: float, right: float, bottom: float
) -> Tuple[int, int, int]:
"""Get the average color of a rectangle in this Image.
Parameters should stay within the following limits:
* 0 <= left < right < Image.width
* 0 <= top < bottom < Image.height
Args:
left (float): Left corner of the region.
top (float): Top corner of the region.
right (float): Right corner of the region.
bottom (float): Bottom corner of the region.
Returns:
Tuple[int, int, int]:
An (r, g, b) tuple containing the averaged color value.
Values are in a 0 to 255 range.
"""
color = lib.TCOD_image_get_mipmap_pixel(
self.image_c, left, top, right, bottom
)
return (color.r, color.g, color.b) |
def get_query_results(self, query_execution_id):
"""
Fetch submitted athena query results. returns none if query is in intermediate state or
failed/cancelled state else dict of query output
:param query_execution_id: Id of submitted athena query
:type query_execution_id: str
:return: dict
"""
query_state = self.check_query_status(query_execution_id)
if query_state is None:
self.log.error('Invalid Query state')
return None
elif query_state in self.INTERMEDIATE_STATES or query_state in self.FAILURE_STATES:
self.log.error('Query is in {state} state. Cannot fetch results'.format(state=query_state))
return None
return self.conn.get_query_results(QueryExecutionId=query_execution_id) | Fetch submitted athena query results. returns none if query is in intermediate state or
failed/cancelled state else dict of query output
:param query_execution_id: Id of submitted athena query
:type query_execution_id: str
:return: dict | Below is the the instruction that describes the task:
### Input:
Fetch submitted athena query results. returns none if query is in intermediate state or
failed/cancelled state else dict of query output
:param query_execution_id: Id of submitted athena query
:type query_execution_id: str
:return: dict
### Response:
def get_query_results(self, query_execution_id):
"""
Fetch submitted athena query results. returns none if query is in intermediate state or
failed/cancelled state else dict of query output
:param query_execution_id: Id of submitted athena query
:type query_execution_id: str
:return: dict
"""
query_state = self.check_query_status(query_execution_id)
if query_state is None:
self.log.error('Invalid Query state')
return None
elif query_state in self.INTERMEDIATE_STATES or query_state in self.FAILURE_STATES:
self.log.error('Query is in {state} state. Cannot fetch results'.format(state=query_state))
return None
return self.conn.get_query_results(QueryExecutionId=query_execution_id) |
def get_debug_info():
"""Return a list of lines with backend info.
"""
from . import __version__
from .parser import SPEC_VERSION
d = OrderedDict()
d['Version'] = '%s' % __version__
d['Spec version'] = SPEC_VERSION
return d | Return a list of lines with backend info. | Below is the the instruction that describes the task:
### Input:
Return a list of lines with backend info.
### Response:
def get_debug_info():
"""Return a list of lines with backend info.
"""
from . import __version__
from .parser import SPEC_VERSION
d = OrderedDict()
d['Version'] = '%s' % __version__
d['Spec version'] = SPEC_VERSION
return d |
def update_list_positions_obj(client, positions_obj_id, revision, values):
'''
Updates the ordering of lists to have the given value. The given ID and revision should match the singleton object defining how lists are laid out.
See https://developer.wunderlist.com/documentation/endpoints/positions for more info
Return:
The updated ListPositionsObj-mapped object defining the order of list layout
'''
return _update_positions_obj(client, client.api.Endpoints.LIST_POSITIONS, positions_obj_id, revision, values) | Updates the ordering of lists to have the given value. The given ID and revision should match the singleton object defining how lists are laid out.
See https://developer.wunderlist.com/documentation/endpoints/positions for more info
Return:
The updated ListPositionsObj-mapped object defining the order of list layout | Below is the the instruction that describes the task:
### Input:
Updates the ordering of lists to have the given value. The given ID and revision should match the singleton object defining how lists are laid out.
See https://developer.wunderlist.com/documentation/endpoints/positions for more info
Return:
The updated ListPositionsObj-mapped object defining the order of list layout
### Response:
def update_list_positions_obj(client, positions_obj_id, revision, values):
'''
Updates the ordering of lists to have the given value. The given ID and revision should match the singleton object defining how lists are laid out.
See https://developer.wunderlist.com/documentation/endpoints/positions for more info
Return:
The updated ListPositionsObj-mapped object defining the order of list layout
'''
return _update_positions_obj(client, client.api.Endpoints.LIST_POSITIONS, positions_obj_id, revision, values) |
def get_cpuid_leaf_by_ordinal(self, ordinal):
"""Used to enumerate CPUID information override values.
in ordinal of type int
The ordinal number of the leaf to get.
out idx of type int
CPUID leaf index.
out idx_sub of type int
CPUID leaf sub-index.
out val_eax of type int
CPUID leaf value for register eax.
out val_ebx of type int
CPUID leaf value for register ebx.
out val_ecx of type int
CPUID leaf value for register ecx.
out val_edx of type int
CPUID leaf value for register edx.
raises :class:`OleErrorInvalidarg`
Invalid ordinal number is out of range.
"""
if not isinstance(ordinal, baseinteger):
raise TypeError("ordinal can only be an instance of type baseinteger")
(idx, idx_sub, val_eax, val_ebx, val_ecx, val_edx) = self._call("getCPUIDLeafByOrdinal",
in_p=[ordinal])
return (idx, idx_sub, val_eax, val_ebx, val_ecx, val_edx) | Used to enumerate CPUID information override values.
in ordinal of type int
The ordinal number of the leaf to get.
out idx of type int
CPUID leaf index.
out idx_sub of type int
CPUID leaf sub-index.
out val_eax of type int
CPUID leaf value for register eax.
out val_ebx of type int
CPUID leaf value for register ebx.
out val_ecx of type int
CPUID leaf value for register ecx.
out val_edx of type int
CPUID leaf value for register edx.
raises :class:`OleErrorInvalidarg`
Invalid ordinal number is out of range. | Below is the the instruction that describes the task:
### Input:
Used to enumerate CPUID information override values.
in ordinal of type int
The ordinal number of the leaf to get.
out idx of type int
CPUID leaf index.
out idx_sub of type int
CPUID leaf sub-index.
out val_eax of type int
CPUID leaf value for register eax.
out val_ebx of type int
CPUID leaf value for register ebx.
out val_ecx of type int
CPUID leaf value for register ecx.
out val_edx of type int
CPUID leaf value for register edx.
raises :class:`OleErrorInvalidarg`
Invalid ordinal number is out of range.
### Response:
def get_cpuid_leaf_by_ordinal(self, ordinal):
"""Used to enumerate CPUID information override values.
in ordinal of type int
The ordinal number of the leaf to get.
out idx of type int
CPUID leaf index.
out idx_sub of type int
CPUID leaf sub-index.
out val_eax of type int
CPUID leaf value for register eax.
out val_ebx of type int
CPUID leaf value for register ebx.
out val_ecx of type int
CPUID leaf value for register ecx.
out val_edx of type int
CPUID leaf value for register edx.
raises :class:`OleErrorInvalidarg`
Invalid ordinal number is out of range.
"""
if not isinstance(ordinal, baseinteger):
raise TypeError("ordinal can only be an instance of type baseinteger")
(idx, idx_sub, val_eax, val_ebx, val_ecx, val_edx) = self._call("getCPUIDLeafByOrdinal",
in_p=[ordinal])
return (idx, idx_sub, val_eax, val_ebx, val_ecx, val_edx) |
def combine_slices(self, slices, tensor_shape, device=None):
"""Turns a set of slices into a single tensor.
Args:
slices: list of tf.Tensor with length self.size.
tensor_shape: Shape.
device: optional str. If absent, we use the devices of the slices.
Returns:
tf.Tensor.
"""
if tensor_shape.ndims == 0:
return slices[0]
ret = slices[:]
tensor_layout = self.tensor_layout(tensor_shape)
for mesh_dim, tensor_axis in zip(
self.shape, tensor_layout.mesh_axis_to_tensor_axis(self.ndims)):
slice_size = len(ret) // mesh_dim.size
if tensor_axis is None:
ret = ret[:slice_size]
else:
if device:
devices = [device] * slice_size
else:
devices = [ret[i].device for i in xrange(slice_size)]
concat_inputs = []
for i in xrange(slice_size):
concat_inputs.append(
[ret[i + slice_size * j] for j in xrange(mesh_dim.size)])
ret = parallel(
devices, tf.concat, concat_inputs,
axis=[tensor_axis] * len(devices))
assert len(ret) == 1
return ret[0] | Turns a set of slices into a single tensor.
Args:
slices: list of tf.Tensor with length self.size.
tensor_shape: Shape.
device: optional str. If absent, we use the devices of the slices.
Returns:
tf.Tensor. | Below is the the instruction that describes the task:
### Input:
Turns a set of slices into a single tensor.
Args:
slices: list of tf.Tensor with length self.size.
tensor_shape: Shape.
device: optional str. If absent, we use the devices of the slices.
Returns:
tf.Tensor.
### Response:
def combine_slices(self, slices, tensor_shape, device=None):
"""Turns a set of slices into a single tensor.
Args:
slices: list of tf.Tensor with length self.size.
tensor_shape: Shape.
device: optional str. If absent, we use the devices of the slices.
Returns:
tf.Tensor.
"""
if tensor_shape.ndims == 0:
return slices[0]
ret = slices[:]
tensor_layout = self.tensor_layout(tensor_shape)
for mesh_dim, tensor_axis in zip(
self.shape, tensor_layout.mesh_axis_to_tensor_axis(self.ndims)):
slice_size = len(ret) // mesh_dim.size
if tensor_axis is None:
ret = ret[:slice_size]
else:
if device:
devices = [device] * slice_size
else:
devices = [ret[i].device for i in xrange(slice_size)]
concat_inputs = []
for i in xrange(slice_size):
concat_inputs.append(
[ret[i + slice_size * j] for j in xrange(mesh_dim.size)])
ret = parallel(
devices, tf.concat, concat_inputs,
axis=[tensor_axis] * len(devices))
assert len(ret) == 1
return ret[0] |
def create(cls, schema, name):
"""
Create an object based on the root tag name.
@param schema: A schema object.
@type schema: L{schema.Schema}
@param name: The name.
@type name: str
@return: The created object.
@rtype: L{XBuiltin}
"""
fn = cls.tags.get(name, XBuiltin)
return fn(schema, name) | Create an object based on the root tag name.
@param schema: A schema object.
@type schema: L{schema.Schema}
@param name: The name.
@type name: str
@return: The created object.
@rtype: L{XBuiltin} | Below is the the instruction that describes the task:
### Input:
Create an object based on the root tag name.
@param schema: A schema object.
@type schema: L{schema.Schema}
@param name: The name.
@type name: str
@return: The created object.
@rtype: L{XBuiltin}
### Response:
def create(cls, schema, name):
"""
Create an object based on the root tag name.
@param schema: A schema object.
@type schema: L{schema.Schema}
@param name: The name.
@type name: str
@return: The created object.
@rtype: L{XBuiltin}
"""
fn = cls.tags.get(name, XBuiltin)
return fn(schema, name) |
def to_shapely_polygon(self):
"""
Convert this polygon to a Shapely polygon.
Returns
-------
shapely.geometry.Polygon
The Shapely polygon matching this polygon's exterior.
"""
# load shapely lazily, which makes the dependency more optional
import shapely.geometry
return shapely.geometry.Polygon([(point[0], point[1]) for point in self.exterior]) | Convert this polygon to a Shapely polygon.
Returns
-------
shapely.geometry.Polygon
The Shapely polygon matching this polygon's exterior. | Below is the the instruction that describes the task:
### Input:
Convert this polygon to a Shapely polygon.
Returns
-------
shapely.geometry.Polygon
The Shapely polygon matching this polygon's exterior.
### Response:
def to_shapely_polygon(self):
"""
Convert this polygon to a Shapely polygon.
Returns
-------
shapely.geometry.Polygon
The Shapely polygon matching this polygon's exterior.
"""
# load shapely lazily, which makes the dependency more optional
import shapely.geometry
return shapely.geometry.Polygon([(point[0], point[1]) for point in self.exterior]) |
def readU8(self, register):
"""Read an unsigned byte from the specified register."""
result = self._bus.read_byte_data(self._address, register) & 0xFF
self._logger.debug("Read 0x%02X from register 0x%02X",
result, register)
return result | Read an unsigned byte from the specified register. | Below is the the instruction that describes the task:
### Input:
Read an unsigned byte from the specified register.
### Response:
def readU8(self, register):
"""Read an unsigned byte from the specified register."""
result = self._bus.read_byte_data(self._address, register) & 0xFF
self._logger.debug("Read 0x%02X from register 0x%02X",
result, register)
return result |
def data_indicators(self, indicators, entity_count):
"""Process Indicator data."""
data = []
# process indicator objects
for xid, indicator_data in indicators.items():
entity_count += 1
if isinstance(indicator_data, dict):
data.append(indicator_data)
else:
data.append(indicator_data.data)
del indicators[xid]
if entity_count >= self._batch_max_chunk:
break
return data, entity_count | Process Indicator data. | Below is the the instruction that describes the task:
### Input:
Process Indicator data.
### Response:
def data_indicators(self, indicators, entity_count):
"""Process Indicator data."""
data = []
# process indicator objects
for xid, indicator_data in indicators.items():
entity_count += 1
if isinstance(indicator_data, dict):
data.append(indicator_data)
else:
data.append(indicator_data.data)
del indicators[xid]
if entity_count >= self._batch_max_chunk:
break
return data, entity_count |
def run_followers(self, prompt):
"""
First caller adds a prompt to queue and
runs followers until there are no more
pending prompts.
Subsequent callers just add a prompt
to the queue, avoiding recursion.
"""
assert isinstance(prompt, Prompt)
# Put the prompt on the queue.
self.pending_prompts.put(prompt)
if self.iteration_lock.acquire(False):
start_time = time.time()
i = 0
try:
while True:
try:
prompt = self.pending_prompts.get(False)
except Empty:
break
else:
followers = self.system.followers[prompt.process_name]
for follower_name in followers:
follower = self.system.processes[follower_name]
follower.run(prompt)
i += 1
self.pending_prompts.task_done()
finally:
run_frequency = i / (time.time() - start_time)
# print(f"Run frequency: {run_frequency}")
self.iteration_lock.release() | First caller adds a prompt to queue and
runs followers until there are no more
pending prompts.
Subsequent callers just add a prompt
to the queue, avoiding recursion. | Below is the the instruction that describes the task:
### Input:
First caller adds a prompt to queue and
runs followers until there are no more
pending prompts.
Subsequent callers just add a prompt
to the queue, avoiding recursion.
### Response:
def run_followers(self, prompt):
"""
First caller adds a prompt to queue and
runs followers until there are no more
pending prompts.
Subsequent callers just add a prompt
to the queue, avoiding recursion.
"""
assert isinstance(prompt, Prompt)
# Put the prompt on the queue.
self.pending_prompts.put(prompt)
if self.iteration_lock.acquire(False):
start_time = time.time()
i = 0
try:
while True:
try:
prompt = self.pending_prompts.get(False)
except Empty:
break
else:
followers = self.system.followers[prompt.process_name]
for follower_name in followers:
follower = self.system.processes[follower_name]
follower.run(prompt)
i += 1
self.pending_prompts.task_done()
finally:
run_frequency = i / (time.time() - start_time)
# print(f"Run frequency: {run_frequency}")
self.iteration_lock.release() |
def revoke(self, role):
"""Remove a role from the entity.
:type role: str
:param role: The role to remove from the entity.
"""
if role in self.roles:
self.roles.remove(role) | Remove a role from the entity.
:type role: str
:param role: The role to remove from the entity. | Below is the the instruction that describes the task:
### Input:
Remove a role from the entity.
:type role: str
:param role: The role to remove from the entity.
### Response:
def revoke(self, role):
"""Remove a role from the entity.
:type role: str
:param role: The role to remove from the entity.
"""
if role in self.roles:
self.roles.remove(role) |
def toggle_comments(self):
"""
Toggles comments on the document selected lines.
:return: Method success.
:rtype: bool
"""
if not self.__comment_marker:
return True
cursor = self.textCursor()
if not cursor.hasSelection():
cursor.movePosition(QTextCursor.StartOfBlock)
line = foundations.strings.to_string(self.document().findBlockByNumber(cursor.blockNumber()).text())
if line.startswith(self.__comment_marker):
foundations.common.repeat(cursor.deleteChar, len(self.__comment_marker))
else:
cursor.insertText(self.__comment_marker)
else:
block = self.document().findBlock(cursor.selectionStart())
while True:
block_cursor = self.textCursor()
block_cursor.setPosition(block.position())
if foundations.strings.to_string(block.text()).startswith(self.__comment_marker):
foundations.common.repeat(block_cursor.deleteChar, len(self.__comment_marker))
else:
block_cursor.insertText(self.__comment_marker)
if block.contains(cursor.selectionEnd()):
break
block = block.next()
return True | Toggles comments on the document selected lines.
:return: Method success.
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Toggles comments on the document selected lines.
:return: Method success.
:rtype: bool
### Response:
def toggle_comments(self):
"""
Toggles comments on the document selected lines.
:return: Method success.
:rtype: bool
"""
if not self.__comment_marker:
return True
cursor = self.textCursor()
if not cursor.hasSelection():
cursor.movePosition(QTextCursor.StartOfBlock)
line = foundations.strings.to_string(self.document().findBlockByNumber(cursor.blockNumber()).text())
if line.startswith(self.__comment_marker):
foundations.common.repeat(cursor.deleteChar, len(self.__comment_marker))
else:
cursor.insertText(self.__comment_marker)
else:
block = self.document().findBlock(cursor.selectionStart())
while True:
block_cursor = self.textCursor()
block_cursor.setPosition(block.position())
if foundations.strings.to_string(block.text()).startswith(self.__comment_marker):
foundations.common.repeat(block_cursor.deleteChar, len(self.__comment_marker))
else:
block_cursor.insertText(self.__comment_marker)
if block.contains(cursor.selectionEnd()):
break
block = block.next()
return True |
def plot_gallery(saveplot=False):
'''Make plot of colormaps and labels, like in the matplotlib
gallery.
:param saveplot=False: Whether to save the plot or not.
'''
from colorspacious import cspace_converter
gradient = np.linspace(0, 1, 256)
gradient = np.vstack((gradient, gradient))
x = np.linspace(0.0, 1.0, 256)
fig, axes = plt.subplots(nrows=int(len(cm.cmap_d)/2), ncols=1, figsize=(6, 12))
fig.subplots_adjust(top=0.99, bottom=0.01, left=0.2, right=0.99, wspace=0.05)
for ax, cmapname in zip(axes, cm.cmapnames):
if '_r' in cmapname: # skip reversed versions for plot
continue
cmap = cm.cmap_d[cmapname] # get the colormap instance
rgb = cmap(x)[np.newaxis, :, :3]
# Find a good conversion to grayscale
jch = cspace_converter("sRGB1", "CAM02-UCS")(rgb) # Not sure why to use JCh instead so using this.
L = jch[0, :, 0]
L = np.float32(np.vstack((L, L, L)))
ax.imshow(gradient, aspect='auto', cmap=cmap)
pos1 = ax.get_position() # get the original position
pos2 = [pos1.x0, pos1.y0, pos1.width, pos1.height / 3.0]
axbw = fig.add_axes(pos2) # colorbar axes
axbw.set_axis_off()
axbw.imshow(L, aspect='auto', cmap=cm.gray, vmin=0, vmax=100.)
pos = list(ax.get_position().bounds)
x_text = pos[0] - 0.01
y_text = pos[1] + pos[3]/2.
fig.text(x_text, y_text, cmap.name, va='center', ha='right')
# Turn off *all* ticks & spines, not just the ones with colormaps.
for ax in axes:
ax.set_axis_off()
if saveplot:
fig.savefig('figures/gallery.pdf', bbox_inches='tight')
fig.savefig('figures/gallery.png', bbox_inches='tight')
plt.show() | Make plot of colormaps and labels, like in the matplotlib
gallery.
:param saveplot=False: Whether to save the plot or not. | Below is the the instruction that describes the task:
### Input:
Make plot of colormaps and labels, like in the matplotlib
gallery.
:param saveplot=False: Whether to save the plot or not.
### Response:
def plot_gallery(saveplot=False):
'''Make plot of colormaps and labels, like in the matplotlib
gallery.
:param saveplot=False: Whether to save the plot or not.
'''
from colorspacious import cspace_converter
gradient = np.linspace(0, 1, 256)
gradient = np.vstack((gradient, gradient))
x = np.linspace(0.0, 1.0, 256)
fig, axes = plt.subplots(nrows=int(len(cm.cmap_d)/2), ncols=1, figsize=(6, 12))
fig.subplots_adjust(top=0.99, bottom=0.01, left=0.2, right=0.99, wspace=0.05)
for ax, cmapname in zip(axes, cm.cmapnames):
if '_r' in cmapname: # skip reversed versions for plot
continue
cmap = cm.cmap_d[cmapname] # get the colormap instance
rgb = cmap(x)[np.newaxis, :, :3]
# Find a good conversion to grayscale
jch = cspace_converter("sRGB1", "CAM02-UCS")(rgb) # Not sure why to use JCh instead so using this.
L = jch[0, :, 0]
L = np.float32(np.vstack((L, L, L)))
ax.imshow(gradient, aspect='auto', cmap=cmap)
pos1 = ax.get_position() # get the original position
pos2 = [pos1.x0, pos1.y0, pos1.width, pos1.height / 3.0]
axbw = fig.add_axes(pos2) # colorbar axes
axbw.set_axis_off()
axbw.imshow(L, aspect='auto', cmap=cm.gray, vmin=0, vmax=100.)
pos = list(ax.get_position().bounds)
x_text = pos[0] - 0.01
y_text = pos[1] + pos[3]/2.
fig.text(x_text, y_text, cmap.name, va='center', ha='right')
# Turn off *all* ticks & spines, not just the ones with colormaps.
for ax in axes:
ax.set_axis_off()
if saveplot:
fig.savefig('figures/gallery.pdf', bbox_inches='tight')
fig.savefig('figures/gallery.png', bbox_inches='tight')
plt.show() |
def snapshot(self, at):
""" Return a new Table which is a snapshot of this table at the specified time.
Args:
at: the time of the snapshot. This can be a Python datetime (absolute) or timedelta
(relative to current time). The result must be after the table was created and no more
than seven days in the past. Passing None will get a reference the oldest snapshot.
Note that using a datetime will get a snapshot at an absolute point in time, while
a timedelta will provide a varying snapshot; any queries issued against such a Table
will be done against a snapshot that has an age relative to the execution time of the
query.
Returns:
A new Table object referencing the snapshot.
Raises:
An exception if this Table is already decorated, or if the time specified is invalid.
"""
if self._name_parts.decorator != '':
raise Exception("Cannot use snapshot() on an already decorated table")
value = Table._convert_decorator_time(at)
return Table("%s@%s" % (self._full_name, str(value)), context=self._context) | Return a new Table which is a snapshot of this table at the specified time.
Args:
at: the time of the snapshot. This can be a Python datetime (absolute) or timedelta
(relative to current time). The result must be after the table was created and no more
than seven days in the past. Passing None will get a reference the oldest snapshot.
Note that using a datetime will get a snapshot at an absolute point in time, while
a timedelta will provide a varying snapshot; any queries issued against such a Table
will be done against a snapshot that has an age relative to the execution time of the
query.
Returns:
A new Table object referencing the snapshot.
Raises:
An exception if this Table is already decorated, or if the time specified is invalid. | Below is the the instruction that describes the task:
### Input:
Return a new Table which is a snapshot of this table at the specified time.
Args:
at: the time of the snapshot. This can be a Python datetime (absolute) or timedelta
(relative to current time). The result must be after the table was created and no more
than seven days in the past. Passing None will get a reference the oldest snapshot.
Note that using a datetime will get a snapshot at an absolute point in time, while
a timedelta will provide a varying snapshot; any queries issued against such a Table
will be done against a snapshot that has an age relative to the execution time of the
query.
Returns:
A new Table object referencing the snapshot.
Raises:
An exception if this Table is already decorated, or if the time specified is invalid.
### Response:
def snapshot(self, at):
""" Return a new Table which is a snapshot of this table at the specified time.
Args:
at: the time of the snapshot. This can be a Python datetime (absolute) or timedelta
(relative to current time). The result must be after the table was created and no more
than seven days in the past. Passing None will get a reference the oldest snapshot.
Note that using a datetime will get a snapshot at an absolute point in time, while
a timedelta will provide a varying snapshot; any queries issued against such a Table
will be done against a snapshot that has an age relative to the execution time of the
query.
Returns:
A new Table object referencing the snapshot.
Raises:
An exception if this Table is already decorated, or if the time specified is invalid.
"""
if self._name_parts.decorator != '':
raise Exception("Cannot use snapshot() on an already decorated table")
value = Table._convert_decorator_time(at)
return Table("%s@%s" % (self._full_name, str(value)), context=self._context) |
def remove_node_attributes(G: nx.DiGraph, attributes: Union[str, Iterable[str]]):
"""
Return a copy of the graph with the given attributes
deleted from all nodes.
"""
G = G.copy()
for _, data in G.nodes(data=True):
for attribute in setwrap(attributes):
if attribute in data:
del data[attribute]
return G | Return a copy of the graph with the given attributes
deleted from all nodes. | Below is the the instruction that describes the task:
### Input:
Return a copy of the graph with the given attributes
deleted from all nodes.
### Response:
def remove_node_attributes(G: nx.DiGraph, attributes: Union[str, Iterable[str]]):
"""
Return a copy of the graph with the given attributes
deleted from all nodes.
"""
G = G.copy()
for _, data in G.nodes(data=True):
for attribute in setwrap(attributes):
if attribute in data:
del data[attribute]
return G |
def split(str, pattern, limit=-1):
"""
Splits str around matches of the given pattern.
:param str: a string expression to split
:param pattern: a string representing a regular expression. The regex string should be
a Java regular expression.
:param limit: an integer which controls the number of times `pattern` is applied.
* ``limit > 0``: The resulting array's length will not be more than `limit`, and the
resulting array's last entry will contain all input beyond the last
matched pattern.
* ``limit <= 0``: `pattern` will be applied as many times as possible, and the resulting
array can be of any size.
.. versionchanged:: 3.0
`split` now takes an optional `limit` field. If not provided, default limit value is -1.
>>> df = spark.createDataFrame([('oneAtwoBthreeC',)], ['s',])
>>> df.select(split(df.s, '[ABC]', 2).alias('s')).collect()
[Row(s=[u'one', u'twoBthreeC'])]
>>> df.select(split(df.s, '[ABC]', -1).alias('s')).collect()
[Row(s=[u'one', u'two', u'three', u''])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.split(_to_java_column(str), pattern, limit)) | Splits str around matches of the given pattern.
:param str: a string expression to split
:param pattern: a string representing a regular expression. The regex string should be
a Java regular expression.
:param limit: an integer which controls the number of times `pattern` is applied.
* ``limit > 0``: The resulting array's length will not be more than `limit`, and the
resulting array's last entry will contain all input beyond the last
matched pattern.
* ``limit <= 0``: `pattern` will be applied as many times as possible, and the resulting
array can be of any size.
.. versionchanged:: 3.0
`split` now takes an optional `limit` field. If not provided, default limit value is -1.
>>> df = spark.createDataFrame([('oneAtwoBthreeC',)], ['s',])
>>> df.select(split(df.s, '[ABC]', 2).alias('s')).collect()
[Row(s=[u'one', u'twoBthreeC'])]
>>> df.select(split(df.s, '[ABC]', -1).alias('s')).collect()
[Row(s=[u'one', u'two', u'three', u''])] | Below is the the instruction that describes the task:
### Input:
Splits str around matches of the given pattern.
:param str: a string expression to split
:param pattern: a string representing a regular expression. The regex string should be
a Java regular expression.
:param limit: an integer which controls the number of times `pattern` is applied.
* ``limit > 0``: The resulting array's length will not be more than `limit`, and the
resulting array's last entry will contain all input beyond the last
matched pattern.
* ``limit <= 0``: `pattern` will be applied as many times as possible, and the resulting
array can be of any size.
.. versionchanged:: 3.0
`split` now takes an optional `limit` field. If not provided, default limit value is -1.
>>> df = spark.createDataFrame([('oneAtwoBthreeC',)], ['s',])
>>> df.select(split(df.s, '[ABC]', 2).alias('s')).collect()
[Row(s=[u'one', u'twoBthreeC'])]
>>> df.select(split(df.s, '[ABC]', -1).alias('s')).collect()
[Row(s=[u'one', u'two', u'three', u''])]
### Response:
def split(str, pattern, limit=-1):
"""
Splits str around matches of the given pattern.
:param str: a string expression to split
:param pattern: a string representing a regular expression. The regex string should be
a Java regular expression.
:param limit: an integer which controls the number of times `pattern` is applied.
* ``limit > 0``: The resulting array's length will not be more than `limit`, and the
resulting array's last entry will contain all input beyond the last
matched pattern.
* ``limit <= 0``: `pattern` will be applied as many times as possible, and the resulting
array can be of any size.
.. versionchanged:: 3.0
`split` now takes an optional `limit` field. If not provided, default limit value is -1.
>>> df = spark.createDataFrame([('oneAtwoBthreeC',)], ['s',])
>>> df.select(split(df.s, '[ABC]', 2).alias('s')).collect()
[Row(s=[u'one', u'twoBthreeC'])]
>>> df.select(split(df.s, '[ABC]', -1).alias('s')).collect()
[Row(s=[u'one', u'two', u'three', u''])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.split(_to_java_column(str), pattern, limit)) |
def _exclude_by_filter(self, frame, filename):
'''
:param str filename:
The filename to filter.
:return: True if it should be excluded, False if it should be included and None
if no rule matched the given file.
'''
try:
return self._exclude_by_filter_cache[filename]
except KeyError:
cache = self._exclude_by_filter_cache
abs_real_path_and_basename = get_abs_path_real_path_and_base_from_file(filename)
# pydevd files are always filtered out
if self.get_file_type(abs_real_path_and_basename) == self.PYDEV_FILE:
cache[filename] = True
else:
module_name = None
if self._files_filtering.require_module:
module_name = frame.f_globals.get('__name__')
cache[filename] = self._files_filtering.exclude_by_filter(filename, module_name)
return cache[filename] | :param str filename:
The filename to filter.
:return: True if it should be excluded, False if it should be included and None
if no rule matched the given file. | Below is the the instruction that describes the task:
### Input:
:param str filename:
The filename to filter.
:return: True if it should be excluded, False if it should be included and None
if no rule matched the given file.
### Response:
def _exclude_by_filter(self, frame, filename):
'''
:param str filename:
The filename to filter.
:return: True if it should be excluded, False if it should be included and None
if no rule matched the given file.
'''
try:
return self._exclude_by_filter_cache[filename]
except KeyError:
cache = self._exclude_by_filter_cache
abs_real_path_and_basename = get_abs_path_real_path_and_base_from_file(filename)
# pydevd files are always filtered out
if self.get_file_type(abs_real_path_and_basename) == self.PYDEV_FILE:
cache[filename] = True
else:
module_name = None
if self._files_filtering.require_module:
module_name = frame.f_globals.get('__name__')
cache[filename] = self._files_filtering.exclude_by_filter(filename, module_name)
return cache[filename] |
def read_headers(rfile, hdict=None):
"""Read headers from the given stream into the given header dict.
If hdict is None, a new header dict is created. Returns the populated
header dict.
Headers which are repeated are folded together using a comma if their
specification so dictates.
This function raises ValueError when the read bytes violate the HTTP spec.
You should probably return "400 Bad Request" if this happens.
"""
if hdict is None:
hdict = {}
while True:
line = rfile.readline()
if not line:
# No more data--illegal end of headers
raise ValueError("Illegal end of headers.")
if line == CRLF:
# Normal end of headers
break
if not line.endswith(CRLF):
raise ValueError("HTTP requires CRLF terminators")
if line[0] in ' \t':
# It's a continuation line.
v = line.strip()
else:
try:
k, v = line.split(":", 1)
except ValueError:
raise ValueError("Illegal header line.")
# TODO: what about TE and WWW-Authenticate?
k = k.strip().title()
v = v.strip()
hname = k
if k in comma_separated_headers:
existing = hdict.get(hname)
if existing:
v = ", ".join((existing, v))
hdict[hname] = v
return hdict | Read headers from the given stream into the given header dict.
If hdict is None, a new header dict is created. Returns the populated
header dict.
Headers which are repeated are folded together using a comma if their
specification so dictates.
This function raises ValueError when the read bytes violate the HTTP spec.
You should probably return "400 Bad Request" if this happens. | Below is the the instruction that describes the task:
### Input:
Read headers from the given stream into the given header dict.
If hdict is None, a new header dict is created. Returns the populated
header dict.
Headers which are repeated are folded together using a comma if their
specification so dictates.
This function raises ValueError when the read bytes violate the HTTP spec.
You should probably return "400 Bad Request" if this happens.
### Response:
def read_headers(rfile, hdict=None):
"""Read headers from the given stream into the given header dict.
If hdict is None, a new header dict is created. Returns the populated
header dict.
Headers which are repeated are folded together using a comma if their
specification so dictates.
This function raises ValueError when the read bytes violate the HTTP spec.
You should probably return "400 Bad Request" if this happens.
"""
if hdict is None:
hdict = {}
while True:
line = rfile.readline()
if not line:
# No more data--illegal end of headers
raise ValueError("Illegal end of headers.")
if line == CRLF:
# Normal end of headers
break
if not line.endswith(CRLF):
raise ValueError("HTTP requires CRLF terminators")
if line[0] in ' \t':
# It's a continuation line.
v = line.strip()
else:
try:
k, v = line.split(":", 1)
except ValueError:
raise ValueError("Illegal header line.")
# TODO: what about TE and WWW-Authenticate?
k = k.strip().title()
v = v.strip()
hname = k
if k in comma_separated_headers:
existing = hdict.get(hname)
if existing:
v = ", ".join((existing, v))
hdict[hname] = v
return hdict |
def remove_device(self, device: Union[DeviceType, str]) -> None:
"""Remove the given |Node| or |Element| object from the actual
|Nodes| or |Elements| object.
You can pass either a string or a device:
>>> from hydpy import Node, Nodes
>>> nodes = Nodes('node_x', 'node_y')
>>> node_x, node_y = nodes
>>> nodes.remove_device(Node('node_y'))
>>> nodes
Nodes("node_x")
>>> nodes.remove_device(Node('node_x'))
>>> nodes
Nodes()
>>> nodes.remove_device(Node('node_z'))
Traceback (most recent call last):
...
ValueError: While trying to remove the device `node_z` from a \
Nodes object, the following error occurred: The actual Nodes object does \
not handle such a device.
Method |Devices.remove_device| is disabled for immutable |Nodes|
and |Elements| objects:
>>> nodes.mutable = False
>>> nodes.remove_device('node_z')
Traceback (most recent call last):
...
RuntimeError: While trying to remove the device `node_z` from a \
Nodes object, the following error occurred: Removing devices from \
immutable Nodes objects is not allowed.
"""
try:
if self.mutable:
_device = self.get_contentclass()(device)
try:
del self._name2device[_device.name]
except KeyError:
raise ValueError(
f'The actual {objecttools.classname(self)} '
f'object does not handle such a device.')
del _id2devices[_device][id(self)]
else:
raise RuntimeError(
f'Removing devices from immutable '
f'{objecttools.classname(self)} objects is not allowed.')
except BaseException:
objecttools.augment_excmessage(
f'While trying to remove the device `{device}` from a '
f'{objecttools.classname(self)} object') | Remove the given |Node| or |Element| object from the actual
|Nodes| or |Elements| object.
You can pass either a string or a device:
>>> from hydpy import Node, Nodes
>>> nodes = Nodes('node_x', 'node_y')
>>> node_x, node_y = nodes
>>> nodes.remove_device(Node('node_y'))
>>> nodes
Nodes("node_x")
>>> nodes.remove_device(Node('node_x'))
>>> nodes
Nodes()
>>> nodes.remove_device(Node('node_z'))
Traceback (most recent call last):
...
ValueError: While trying to remove the device `node_z` from a \
Nodes object, the following error occurred: The actual Nodes object does \
not handle such a device.
Method |Devices.remove_device| is disabled for immutable |Nodes|
and |Elements| objects:
>>> nodes.mutable = False
>>> nodes.remove_device('node_z')
Traceback (most recent call last):
...
RuntimeError: While trying to remove the device `node_z` from a \
Nodes object, the following error occurred: Removing devices from \
immutable Nodes objects is not allowed. | Below is the the instruction that describes the task:
### Input:
Remove the given |Node| or |Element| object from the actual
|Nodes| or |Elements| object.
You can pass either a string or a device:
>>> from hydpy import Node, Nodes
>>> nodes = Nodes('node_x', 'node_y')
>>> node_x, node_y = nodes
>>> nodes.remove_device(Node('node_y'))
>>> nodes
Nodes("node_x")
>>> nodes.remove_device(Node('node_x'))
>>> nodes
Nodes()
>>> nodes.remove_device(Node('node_z'))
Traceback (most recent call last):
...
ValueError: While trying to remove the device `node_z` from a \
Nodes object, the following error occurred: The actual Nodes object does \
not handle such a device.
Method |Devices.remove_device| is disabled for immutable |Nodes|
and |Elements| objects:
>>> nodes.mutable = False
>>> nodes.remove_device('node_z')
Traceback (most recent call last):
...
RuntimeError: While trying to remove the device `node_z` from a \
Nodes object, the following error occurred: Removing devices from \
immutable Nodes objects is not allowed.
### Response:
def remove_device(self, device: Union[DeviceType, str]) -> None:
"""Remove the given |Node| or |Element| object from the actual
|Nodes| or |Elements| object.
You can pass either a string or a device:
>>> from hydpy import Node, Nodes
>>> nodes = Nodes('node_x', 'node_y')
>>> node_x, node_y = nodes
>>> nodes.remove_device(Node('node_y'))
>>> nodes
Nodes("node_x")
>>> nodes.remove_device(Node('node_x'))
>>> nodes
Nodes()
>>> nodes.remove_device(Node('node_z'))
Traceback (most recent call last):
...
ValueError: While trying to remove the device `node_z` from a \
Nodes object, the following error occurred: The actual Nodes object does \
not handle such a device.
Method |Devices.remove_device| is disabled for immutable |Nodes|
and |Elements| objects:
>>> nodes.mutable = False
>>> nodes.remove_device('node_z')
Traceback (most recent call last):
...
RuntimeError: While trying to remove the device `node_z` from a \
Nodes object, the following error occurred: Removing devices from \
immutable Nodes objects is not allowed.
"""
try:
if self.mutable:
_device = self.get_contentclass()(device)
try:
del self._name2device[_device.name]
except KeyError:
raise ValueError(
f'The actual {objecttools.classname(self)} '
f'object does not handle such a device.')
del _id2devices[_device][id(self)]
else:
raise RuntimeError(
f'Removing devices from immutable '
f'{objecttools.classname(self)} objects is not allowed.')
except BaseException:
objecttools.augment_excmessage(
f'While trying to remove the device `{device}` from a '
f'{objecttools.classname(self)} object') |
def _aggregate(self, source, aggregators, data, result):
"""Performs aggregation at a specific node in the data/aggregator tree."""
if data is None:
return
if hasattr(aggregators, 'items'):
# Keep walking the tree.
for key, value in six.iteritems(aggregators):
if isinstance(key, tuple):
key, regex = key
for dataKey, dataValue in six.iteritems(data):
if regex.match(dataKey):
result.setdefault(key, {})
self._aggregate(source, value, dataValue, result[key])
else:
if key == '*':
for dataKey, dataValue in six.iteritems(data):
result.setdefault(dataKey, {})
self._aggregate(source, value, dataValue, result[dataKey])
elif key in data:
result.setdefault(key, {})
self._aggregate(source, value, data[key], result[key])
else:
# We found a leaf.
for aggregator in aggregators:
if aggregator.name not in result:
result[aggregator.name] = aggregator.clone()
result[aggregator.name].addValue(source, data) | Performs aggregation at a specific node in the data/aggregator tree. | Below is the the instruction that describes the task:
### Input:
Performs aggregation at a specific node in the data/aggregator tree.
### Response:
def _aggregate(self, source, aggregators, data, result):
"""Performs aggregation at a specific node in the data/aggregator tree."""
if data is None:
return
if hasattr(aggregators, 'items'):
# Keep walking the tree.
for key, value in six.iteritems(aggregators):
if isinstance(key, tuple):
key, regex = key
for dataKey, dataValue in six.iteritems(data):
if regex.match(dataKey):
result.setdefault(key, {})
self._aggregate(source, value, dataValue, result[key])
else:
if key == '*':
for dataKey, dataValue in six.iteritems(data):
result.setdefault(dataKey, {})
self._aggregate(source, value, dataValue, result[dataKey])
elif key in data:
result.setdefault(key, {})
self._aggregate(source, value, data[key], result[key])
else:
# We found a leaf.
for aggregator in aggregators:
if aggregator.name not in result:
result[aggregator.name] = aggregator.clone()
result[aggregator.name].addValue(source, data) |
def main():
'''Main routine.'''
# validate command line arguments
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--vmssname', '-n', required=True, action='store',
help='Scale set name')
arg_parser.add_argument('--rgname', '-g', required=True, action='store',
help='Resource Group Name')
arg_parser.add_argument('--operation', '-o', required=True, action='store',
help='Operation (attach/detach)')
arg_parser.add_argument('--vmid', '-i', required=True,
action='store', help='VM id')
arg_parser.add_argument('--lun', '-l', required=True,
action='store', help='lun id')
arg_parser.add_argument('--diskname', '-d', required=False, action='store',
help='Optional password')
args = arg_parser.parse_args()
vmssname = args.vmssname
rgname = args.rgname
operation = args.operation
vmid = args.vmid
lun = int(args.lun)
diskname = args.diskname
if operation != 'attach' and operation != 'detach':
sys.exit('--operation must be attach or detach')
if diskname is None and operation == 'attach':
sys.exit('--diskname is required for attach operation.')
subscription_id = azurerm.get_subscription_from_cli()
# authenticate
access_token = azurerm.get_access_token_from_cli()
# do a get on the VM
vmssvm_model = azurerm.get_vmss_vm(access_token, subscription_id, rgname, vmssname, vmid)
# check operation
if operation == 'attach':
new_model = attach_model(subscription_id, rgname, vmssvm_model, diskname, lun)
else:
if operation == 'detach':
new_model = detach_model(vmssvm_model, lun)
# do a put on the VM
rmreturn = azurerm.put_vmss_vm(access_token, subscription_id, rgname, vmssname, vmid,
new_model)
if rmreturn.status_code != 201:
sys.exit('Error ' + str(rmreturn.status_code) +
' creating VM. ' + rmreturn.text)
print(json.dumps(rmreturn, sort_keys=False, indent=2, separators=(',', ': '))) | Main routine. | Below is the the instruction that describes the task:
### Input:
Main routine.
### Response:
def main():
'''Main routine.'''
# validate command line arguments
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--vmssname', '-n', required=True, action='store',
help='Scale set name')
arg_parser.add_argument('--rgname', '-g', required=True, action='store',
help='Resource Group Name')
arg_parser.add_argument('--operation', '-o', required=True, action='store',
help='Operation (attach/detach)')
arg_parser.add_argument('--vmid', '-i', required=True,
action='store', help='VM id')
arg_parser.add_argument('--lun', '-l', required=True,
action='store', help='lun id')
arg_parser.add_argument('--diskname', '-d', required=False, action='store',
help='Optional password')
args = arg_parser.parse_args()
vmssname = args.vmssname
rgname = args.rgname
operation = args.operation
vmid = args.vmid
lun = int(args.lun)
diskname = args.diskname
if operation != 'attach' and operation != 'detach':
sys.exit('--operation must be attach or detach')
if diskname is None and operation == 'attach':
sys.exit('--diskname is required for attach operation.')
subscription_id = azurerm.get_subscription_from_cli()
# authenticate
access_token = azurerm.get_access_token_from_cli()
# do a get on the VM
vmssvm_model = azurerm.get_vmss_vm(access_token, subscription_id, rgname, vmssname, vmid)
# check operation
if operation == 'attach':
new_model = attach_model(subscription_id, rgname, vmssvm_model, diskname, lun)
else:
if operation == 'detach':
new_model = detach_model(vmssvm_model, lun)
# do a put on the VM
rmreturn = azurerm.put_vmss_vm(access_token, subscription_id, rgname, vmssname, vmid,
new_model)
if rmreturn.status_code != 201:
sys.exit('Error ' + str(rmreturn.status_code) +
' creating VM. ' + rmreturn.text)
print(json.dumps(rmreturn, sort_keys=False, indent=2, separators=(',', ': '))) |
def zero_pad(m, n=1):
"""Pad a matrix with zeros, on all sides."""
return np.pad(m, (n, n), mode='constant', constant_values=[0]) | Pad a matrix with zeros, on all sides. | Below is the the instruction that describes the task:
### Input:
Pad a matrix with zeros, on all sides.
### Response:
def zero_pad(m, n=1):
"""Pad a matrix with zeros, on all sides."""
return np.pad(m, (n, n), mode='constant', constant_values=[0]) |
def _validate_annotation(self, annotation):
'''Ensures that the annotation has the right fields.'''
required_keys = set(self._required_keys)
keys = set(key for key, val in annotation.items() if val)
missing_keys = required_keys.difference(keys)
if missing_keys:
error = 'Annotation missing required fields: {0}'.format(
missing_keys)
raise AnnotationError(error) | Ensures that the annotation has the right fields. | Below is the the instruction that describes the task:
### Input:
Ensures that the annotation has the right fields.
### Response:
def _validate_annotation(self, annotation):
'''Ensures that the annotation has the right fields.'''
required_keys = set(self._required_keys)
keys = set(key for key, val in annotation.items() if val)
missing_keys = required_keys.difference(keys)
if missing_keys:
error = 'Annotation missing required fields: {0}'.format(
missing_keys)
raise AnnotationError(error) |
def assert_has_calls(self, calls, any_order=False):
"""assert the mock has been called with the specified calls.
The `mock_calls` list is checked for the calls.
If `any_order` is False (the default) then the calls must be
sequential. There can be extra calls before or after the
specified calls.
If `any_order` is True then the calls can be in any order, but
they must all appear in `mock_calls`."""
if not any_order:
if calls not in self.mock_calls:
raise AssertionError(
'Calls not found.\nExpected: %r\n'
'Actual: %r' % (calls, self.mock_calls)
)
return
all_calls = list(self.mock_calls)
not_found = []
for kall in calls:
try:
all_calls.remove(kall)
except ValueError:
not_found.append(kall)
if not_found:
raise AssertionError(
'%r not all found in call list' % (tuple(not_found),)
) | assert the mock has been called with the specified calls.
The `mock_calls` list is checked for the calls.
If `any_order` is False (the default) then the calls must be
sequential. There can be extra calls before or after the
specified calls.
If `any_order` is True then the calls can be in any order, but
they must all appear in `mock_calls`. | Below is the the instruction that describes the task:
### Input:
assert the mock has been called with the specified calls.
The `mock_calls` list is checked for the calls.
If `any_order` is False (the default) then the calls must be
sequential. There can be extra calls before or after the
specified calls.
If `any_order` is True then the calls can be in any order, but
they must all appear in `mock_calls`.
### Response:
def assert_has_calls(self, calls, any_order=False):
"""assert the mock has been called with the specified calls.
The `mock_calls` list is checked for the calls.
If `any_order` is False (the default) then the calls must be
sequential. There can be extra calls before or after the
specified calls.
If `any_order` is True then the calls can be in any order, but
they must all appear in `mock_calls`."""
if not any_order:
if calls not in self.mock_calls:
raise AssertionError(
'Calls not found.\nExpected: %r\n'
'Actual: %r' % (calls, self.mock_calls)
)
return
all_calls = list(self.mock_calls)
not_found = []
for kall in calls:
try:
all_calls.remove(kall)
except ValueError:
not_found.append(kall)
if not_found:
raise AssertionError(
'%r not all found in call list' % (tuple(not_found),)
) |
def check_permissions(self, request):
""" Retrieves the controlled object and perform the permissions check. """
obj = (
hasattr(self, 'get_controlled_object') and self.get_controlled_object() or
hasattr(self, 'get_object') and self.get_object() or getattr(self, 'object', None)
)
user = request.user
# Get the permissions to check
perms = self.get_required_permissions(self)
# Check permissions
has_permissions = self.perform_permissions_check(user, obj, perms)
if not has_permissions and not user.is_authenticated:
return HttpResponseRedirect('{}?{}={}'.format(
resolve_url(self.login_url),
self.redirect_field_name,
urlquote(request.get_full_path())
))
elif not has_permissions:
raise PermissionDenied | Retrieves the controlled object and perform the permissions check. | Below is the the instruction that describes the task:
### Input:
Retrieves the controlled object and perform the permissions check.
### Response:
def check_permissions(self, request):
""" Retrieves the controlled object and perform the permissions check. """
obj = (
hasattr(self, 'get_controlled_object') and self.get_controlled_object() or
hasattr(self, 'get_object') and self.get_object() or getattr(self, 'object', None)
)
user = request.user
# Get the permissions to check
perms = self.get_required_permissions(self)
# Check permissions
has_permissions = self.perform_permissions_check(user, obj, perms)
if not has_permissions and not user.is_authenticated:
return HttpResponseRedirect('{}?{}={}'.format(
resolve_url(self.login_url),
self.redirect_field_name,
urlquote(request.get_full_path())
))
elif not has_permissions:
raise PermissionDenied |
def plot_learning_curve(clf, X, y, title='Learning Curve', cv=None,
shuffle=False, random_state=None,
train_sizes=None, n_jobs=1, scoring=None,
ax=None, figsize=None, title_fontsize="large",
text_fontsize="medium"):
"""Generates a plot of the train and test learning curves for a classifier.
Args:
clf: Classifier instance that implements ``fit`` and ``predict``
methods.
X (array-like, shape (n_samples, n_features)):
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y (array-like, shape (n_samples) or (n_samples, n_features)):
Target relative to X for classification or regression;
None for unsupervised learning.
title (string, optional): Title of the generated plot. Defaults to
"Learning Curve"
cv (int, cross-validation generator, iterable, optional): Determines
the cross-validation strategy to be used for splitting.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is
used.
shuffle (bool, optional): Used when do_cv is set to True. Determines
whether to shuffle the training data before splitting using
cross-validation. Default set to True.
random_state (int :class:`RandomState`): Pseudo-random number generator
state used for random sampling.
train_sizes (iterable, optional): Determines the training sizes used to
plot the learning curve. If None, ``np.linspace(.1, 1.0, 5)`` is
used.
n_jobs (int, optional): Number of jobs to run in parallel. Defaults to
1.
scoring (string, callable or None, optional): default: None
A string (see scikit-learn model evaluation documentation) or a
scorerbcallable object / function with signature
scorer(estimator, X, y).
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the curve. If None, the plot is drawn on a new set of axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> import scikitplot as skplt
>>> rf = RandomForestClassifier()
>>> skplt.estimators.plot_learning_curve(rf, X, y)
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_learning_curve.png
:align: center
:alt: Learning Curve
"""
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
if train_sizes is None:
train_sizes = np.linspace(.1, 1.0, 5)
ax.set_title(title, fontsize=title_fontsize)
ax.set_xlabel("Training examples", fontsize=text_fontsize)
ax.set_ylabel("Score", fontsize=text_fontsize)
train_sizes, train_scores, test_scores = learning_curve(
clf, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes,
scoring=scoring, shuffle=shuffle, random_state=random_state)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
ax.grid()
ax.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1, color="r")
ax.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
ax.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
ax.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
ax.tick_params(labelsize=text_fontsize)
ax.legend(loc="best", fontsize=text_fontsize)
return ax | Generates a plot of the train and test learning curves for a classifier.
Args:
clf: Classifier instance that implements ``fit`` and ``predict``
methods.
X (array-like, shape (n_samples, n_features)):
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y (array-like, shape (n_samples) or (n_samples, n_features)):
Target relative to X for classification or regression;
None for unsupervised learning.
title (string, optional): Title of the generated plot. Defaults to
"Learning Curve"
cv (int, cross-validation generator, iterable, optional): Determines
the cross-validation strategy to be used for splitting.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is
used.
shuffle (bool, optional): Used when do_cv is set to True. Determines
whether to shuffle the training data before splitting using
cross-validation. Default set to True.
random_state (int :class:`RandomState`): Pseudo-random number generator
state used for random sampling.
train_sizes (iterable, optional): Determines the training sizes used to
plot the learning curve. If None, ``np.linspace(.1, 1.0, 5)`` is
used.
n_jobs (int, optional): Number of jobs to run in parallel. Defaults to
1.
scoring (string, callable or None, optional): default: None
A string (see scikit-learn model evaluation documentation) or a
scorerbcallable object / function with signature
scorer(estimator, X, y).
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the curve. If None, the plot is drawn on a new set of axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> import scikitplot as skplt
>>> rf = RandomForestClassifier()
>>> skplt.estimators.plot_learning_curve(rf, X, y)
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_learning_curve.png
:align: center
:alt: Learning Curve | Below is the the instruction that describes the task:
### Input:
Generates a plot of the train and test learning curves for a classifier.
Args:
clf: Classifier instance that implements ``fit`` and ``predict``
methods.
X (array-like, shape (n_samples, n_features)):
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y (array-like, shape (n_samples) or (n_samples, n_features)):
Target relative to X for classification or regression;
None for unsupervised learning.
title (string, optional): Title of the generated plot. Defaults to
"Learning Curve"
cv (int, cross-validation generator, iterable, optional): Determines
the cross-validation strategy to be used for splitting.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is
used.
shuffle (bool, optional): Used when do_cv is set to True. Determines
whether to shuffle the training data before splitting using
cross-validation. Default set to True.
random_state (int :class:`RandomState`): Pseudo-random number generator
state used for random sampling.
train_sizes (iterable, optional): Determines the training sizes used to
plot the learning curve. If None, ``np.linspace(.1, 1.0, 5)`` is
used.
n_jobs (int, optional): Number of jobs to run in parallel. Defaults to
1.
scoring (string, callable or None, optional): default: None
A string (see scikit-learn model evaluation documentation) or a
scorerbcallable object / function with signature
scorer(estimator, X, y).
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the curve. If None, the plot is drawn on a new set of axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> import scikitplot as skplt
>>> rf = RandomForestClassifier()
>>> skplt.estimators.plot_learning_curve(rf, X, y)
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_learning_curve.png
:align: center
:alt: Learning Curve
### Response:
def plot_learning_curve(clf, X, y, title='Learning Curve', cv=None,
shuffle=False, random_state=None,
train_sizes=None, n_jobs=1, scoring=None,
ax=None, figsize=None, title_fontsize="large",
text_fontsize="medium"):
"""Generates a plot of the train and test learning curves for a classifier.
Args:
clf: Classifier instance that implements ``fit`` and ``predict``
methods.
X (array-like, shape (n_samples, n_features)):
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y (array-like, shape (n_samples) or (n_samples, n_features)):
Target relative to X for classification or regression;
None for unsupervised learning.
title (string, optional): Title of the generated plot. Defaults to
"Learning Curve"
cv (int, cross-validation generator, iterable, optional): Determines
the cross-validation strategy to be used for splitting.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is
used.
shuffle (bool, optional): Used when do_cv is set to True. Determines
whether to shuffle the training data before splitting using
cross-validation. Default set to True.
random_state (int :class:`RandomState`): Pseudo-random number generator
state used for random sampling.
train_sizes (iterable, optional): Determines the training sizes used to
plot the learning curve. If None, ``np.linspace(.1, 1.0, 5)`` is
used.
n_jobs (int, optional): Number of jobs to run in parallel. Defaults to
1.
scoring (string, callable or None, optional): default: None
A string (see scikit-learn model evaluation documentation) or a
scorerbcallable object / function with signature
scorer(estimator, X, y).
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the curve. If None, the plot is drawn on a new set of axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> import scikitplot as skplt
>>> rf = RandomForestClassifier()
>>> skplt.estimators.plot_learning_curve(rf, X, y)
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_learning_curve.png
:align: center
:alt: Learning Curve
"""
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
if train_sizes is None:
train_sizes = np.linspace(.1, 1.0, 5)
ax.set_title(title, fontsize=title_fontsize)
ax.set_xlabel("Training examples", fontsize=text_fontsize)
ax.set_ylabel("Score", fontsize=text_fontsize)
train_sizes, train_scores, test_scores = learning_curve(
clf, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes,
scoring=scoring, shuffle=shuffle, random_state=random_state)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
ax.grid()
ax.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1, color="r")
ax.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
ax.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
ax.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
ax.tick_params(labelsize=text_fontsize)
ax.legend(loc="best", fontsize=text_fontsize)
return ax |
def pi_revision():
"""Detect the revision number of a Raspberry Pi, useful for changing
functionality like default I2C bus based on revision."""
# Revision list available at: http://elinux.org/RPi_HardwareHistory#Board_Revision_History
with open('/proc/cpuinfo', 'r') as infile:
for line in infile:
# Match a line of the form "Revision : 0002" while ignoring extra
# info in front of the revsion (like 1000 when the Pi was over-volted).
match = re.match('Revision\s+:\s+.*(\w{4})$', line, flags=re.IGNORECASE)
if match and match.group(1) in ['0000', '0002', '0003']:
# Return revision 1 if revision ends with 0000, 0002 or 0003.
return 1
elif match:
# Assume revision 2 if revision ends with any other 4 chars.
return 2
# Couldn't find the revision, throw an exception.
raise RuntimeError('Could not determine Raspberry Pi revision.') | Detect the revision number of a Raspberry Pi, useful for changing
functionality like default I2C bus based on revision. | Below is the the instruction that describes the task:
### Input:
Detect the revision number of a Raspberry Pi, useful for changing
functionality like default I2C bus based on revision.
### Response:
def pi_revision():
"""Detect the revision number of a Raspberry Pi, useful for changing
functionality like default I2C bus based on revision."""
# Revision list available at: http://elinux.org/RPi_HardwareHistory#Board_Revision_History
with open('/proc/cpuinfo', 'r') as infile:
for line in infile:
# Match a line of the form "Revision : 0002" while ignoring extra
# info in front of the revsion (like 1000 when the Pi was over-volted).
match = re.match('Revision\s+:\s+.*(\w{4})$', line, flags=re.IGNORECASE)
if match and match.group(1) in ['0000', '0002', '0003']:
# Return revision 1 if revision ends with 0000, 0002 or 0003.
return 1
elif match:
# Assume revision 2 if revision ends with any other 4 chars.
return 2
# Couldn't find the revision, throw an exception.
raise RuntimeError('Could not determine Raspberry Pi revision.') |
def stop(self):
"""Stop serving. Always call this to clean up after yourself."""
self._stopped = True
threads = [self._accept_thread]
threads.extend(self._server_threads)
self._listening_sock.close()
for sock in list(self._server_socks):
try:
sock.shutdown(socket.SHUT_RDWR)
except socket.error:
pass
try:
sock.close()
except socket.error:
pass
with self._unlock():
for thread in threads:
thread.join(10)
if self._uds_path:
try:
os.unlink(self._uds_path)
except OSError:
pass | Stop serving. Always call this to clean up after yourself. | Below is the the instruction that describes the task:
### Input:
Stop serving. Always call this to clean up after yourself.
### Response:
def stop(self):
"""Stop serving. Always call this to clean up after yourself."""
self._stopped = True
threads = [self._accept_thread]
threads.extend(self._server_threads)
self._listening_sock.close()
for sock in list(self._server_socks):
try:
sock.shutdown(socket.SHUT_RDWR)
except socket.error:
pass
try:
sock.close()
except socket.error:
pass
with self._unlock():
for thread in threads:
thread.join(10)
if self._uds_path:
try:
os.unlink(self._uds_path)
except OSError:
pass |
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# extract dictionaries of coefficients specific to required
# intensity measure type and for PGA
C = self.COEFFS[imt]
# For inslab GMPEs the correction term is fixed at -0.3
dc1 = -0.3
C_PGA = self.COEFFS[PGA()]
# compute median pga on rock (vs30=1000), needed for site response
# term calculation
pga1000 = np.exp(
self._compute_pga_rock(C_PGA, dc1, sites, rup, dists))
mean = (self._compute_magnitude_term(C, dc1, rup.mag) +
self._compute_distance_term(C, rup.mag, dists) +
self._compute_focal_depth_term(C, rup) +
self._compute_forearc_backarc_term(C, sites, dists) +
self._compute_site_response_term(C, sites, pga1000))
stddevs = self._get_stddevs(C, stddev_types, len(sites.vs30))
return mean, stddevs | See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values. | Below is the the instruction that describes the task:
### Input:
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
### Response:
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# extract dictionaries of coefficients specific to required
# intensity measure type and for PGA
C = self.COEFFS[imt]
# For inslab GMPEs the correction term is fixed at -0.3
dc1 = -0.3
C_PGA = self.COEFFS[PGA()]
# compute median pga on rock (vs30=1000), needed for site response
# term calculation
pga1000 = np.exp(
self._compute_pga_rock(C_PGA, dc1, sites, rup, dists))
mean = (self._compute_magnitude_term(C, dc1, rup.mag) +
self._compute_distance_term(C, rup.mag, dists) +
self._compute_focal_depth_term(C, rup) +
self._compute_forearc_backarc_term(C, sites, dists) +
self._compute_site_response_term(C, sites, pga1000))
stddevs = self._get_stddevs(C, stddev_types, len(sites.vs30))
return mean, stddevs |
def fill_binop(left, right, fill_value):
"""
If a non-None fill_value is given, replace null entries in left and right
with this value, but only in positions where _one_ of left/right is null,
not both.
Parameters
----------
left : array-like
right : array-like
fill_value : object
Returns
-------
left : array-like
right : array-like
Notes
-----
Makes copies if fill_value is not None
"""
# TODO: can we make a no-copy implementation?
if fill_value is not None:
left_mask = isna(left)
right_mask = isna(right)
left = left.copy()
right = right.copy()
# one but not both
mask = left_mask ^ right_mask
left[left_mask & mask] = fill_value
right[right_mask & mask] = fill_value
return left, right | If a non-None fill_value is given, replace null entries in left and right
with this value, but only in positions where _one_ of left/right is null,
not both.
Parameters
----------
left : array-like
right : array-like
fill_value : object
Returns
-------
left : array-like
right : array-like
Notes
-----
Makes copies if fill_value is not None | Below is the the instruction that describes the task:
### Input:
If a non-None fill_value is given, replace null entries in left and right
with this value, but only in positions where _one_ of left/right is null,
not both.
Parameters
----------
left : array-like
right : array-like
fill_value : object
Returns
-------
left : array-like
right : array-like
Notes
-----
Makes copies if fill_value is not None
### Response:
def fill_binop(left, right, fill_value):
"""
If a non-None fill_value is given, replace null entries in left and right
with this value, but only in positions where _one_ of left/right is null,
not both.
Parameters
----------
left : array-like
right : array-like
fill_value : object
Returns
-------
left : array-like
right : array-like
Notes
-----
Makes copies if fill_value is not None
"""
# TODO: can we make a no-copy implementation?
if fill_value is not None:
left_mask = isna(left)
right_mask = isna(right)
left = left.copy()
right = right.copy()
# one but not both
mask = left_mask ^ right_mask
left[left_mask & mask] = fill_value
right[right_mask & mask] = fill_value
return left, right |
def get_graph_data(self, graph, benchmark):
"""
Iterator over graph data sets
Yields
------
param_idx
Flat index to parameter permutations for parameterized benchmarks.
None if benchmark is not parameterized.
entry_name
Name for the data set. If benchmark is non-parameterized, this is the
benchmark name.
steps
Steps to consider in regression detection.
threshold
User-specified threshold for regression detection.
"""
if benchmark.get('params'):
param_iter = enumerate(zip(itertools.product(*benchmark['params']),
graph.get_steps()))
else:
param_iter = [(None, (None, graph.get_steps()))]
for j, (param, steps) in param_iter:
if param is None:
entry_name = benchmark['name']
else:
entry_name = benchmark['name'] + '({0})'.format(', '.join(param))
start_revision = self._get_start_revision(graph, benchmark, entry_name)
threshold = self._get_threshold(graph, benchmark, entry_name)
if start_revision is None:
# Skip detection
continue
steps = [step for step in steps if step[1] >= start_revision]
yield j, entry_name, steps, threshold | Iterator over graph data sets
Yields
------
param_idx
Flat index to parameter permutations for parameterized benchmarks.
None if benchmark is not parameterized.
entry_name
Name for the data set. If benchmark is non-parameterized, this is the
benchmark name.
steps
Steps to consider in regression detection.
threshold
User-specified threshold for regression detection. | Below is the the instruction that describes the task:
### Input:
Iterator over graph data sets
Yields
------
param_idx
Flat index to parameter permutations for parameterized benchmarks.
None if benchmark is not parameterized.
entry_name
Name for the data set. If benchmark is non-parameterized, this is the
benchmark name.
steps
Steps to consider in regression detection.
threshold
User-specified threshold for regression detection.
### Response:
def get_graph_data(self, graph, benchmark):
"""
Iterator over graph data sets
Yields
------
param_idx
Flat index to parameter permutations for parameterized benchmarks.
None if benchmark is not parameterized.
entry_name
Name for the data set. If benchmark is non-parameterized, this is the
benchmark name.
steps
Steps to consider in regression detection.
threshold
User-specified threshold for regression detection.
"""
if benchmark.get('params'):
param_iter = enumerate(zip(itertools.product(*benchmark['params']),
graph.get_steps()))
else:
param_iter = [(None, (None, graph.get_steps()))]
for j, (param, steps) in param_iter:
if param is None:
entry_name = benchmark['name']
else:
entry_name = benchmark['name'] + '({0})'.format(', '.join(param))
start_revision = self._get_start_revision(graph, benchmark, entry_name)
threshold = self._get_threshold(graph, benchmark, entry_name)
if start_revision is None:
# Skip detection
continue
steps = [step for step in steps if step[1] >= start_revision]
yield j, entry_name, steps, threshold |
def count(self):
"""
Return a new DStream in which each RDD has a single element
generated by counting each RDD of this DStream.
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).reduce(operator.add) | Return a new DStream in which each RDD has a single element
generated by counting each RDD of this DStream. | Below is the the instruction that describes the task:
### Input:
Return a new DStream in which each RDD has a single element
generated by counting each RDD of this DStream.
### Response:
def count(self):
"""
Return a new DStream in which each RDD has a single element
generated by counting each RDD of this DStream.
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).reduce(operator.add) |
def simple_predictive_probability_multistate(
self, M_c, X_L_list, X_D_list, Y, Q):
"""Calculate probability of a cell taking a value given a latent state.
:param Y: A list of constraints to apply when querying. Each constraint
is a triplet of (r,d,v): r is the row index, d is the column
index and v is the value of the constraint
:type Y: list of lists
:param Q: A list of values to query. Each value is triplet of (r,d,v):
r is the row index, d is the column index, and v is the value at
which the density is evaluated.
:type Q: list of lists
:returns: list of floats -- probabilities of the values specified by Q
"""
return su.simple_predictive_probability_multistate(
M_c, X_L_list, X_D_list, Y, Q) | Calculate probability of a cell taking a value given a latent state.
:param Y: A list of constraints to apply when querying. Each constraint
is a triplet of (r,d,v): r is the row index, d is the column
index and v is the value of the constraint
:type Y: list of lists
:param Q: A list of values to query. Each value is triplet of (r,d,v):
r is the row index, d is the column index, and v is the value at
which the density is evaluated.
:type Q: list of lists
:returns: list of floats -- probabilities of the values specified by Q | Below is the the instruction that describes the task:
### Input:
Calculate probability of a cell taking a value given a latent state.
:param Y: A list of constraints to apply when querying. Each constraint
is a triplet of (r,d,v): r is the row index, d is the column
index and v is the value of the constraint
:type Y: list of lists
:param Q: A list of values to query. Each value is triplet of (r,d,v):
r is the row index, d is the column index, and v is the value at
which the density is evaluated.
:type Q: list of lists
:returns: list of floats -- probabilities of the values specified by Q
### Response:
def simple_predictive_probability_multistate(
self, M_c, X_L_list, X_D_list, Y, Q):
"""Calculate probability of a cell taking a value given a latent state.
:param Y: A list of constraints to apply when querying. Each constraint
is a triplet of (r,d,v): r is the row index, d is the column
index and v is the value of the constraint
:type Y: list of lists
:param Q: A list of values to query. Each value is triplet of (r,d,v):
r is the row index, d is the column index, and v is the value at
which the density is evaluated.
:type Q: list of lists
:returns: list of floats -- probabilities of the values specified by Q
"""
return su.simple_predictive_probability_multistate(
M_c, X_L_list, X_D_list, Y, Q) |
def pdf(cls, mass, log_mode=True):
""" PDF for the Kroupa IMF.
Normalization is set over the mass range from 0.1 Msun to 100 Msun
"""
log_mass = np.log10(mass)
# From Eq 2
mb = mbreak = [0.08, 0.5] # Msun
a = alpha = [0.3, 1.3, 2.3] # alpha
# Normalization set from 0.1 -- 100 Msun
norm = 0.27947743949440446
b = 1./norm
c = b * mbreak[0]**(alpha[1]-alpha[0])
d = c * mbreak[1]**(alpha[2]-alpha[1])
dn_dm = b * (mass < 0.08) * mass**(-alpha[0])
dn_dm += c * (0.08 <= mass) * (mass < 0.5) * mass**(-alpha[1])
dn_dm += d * (0.5 <= mass) * mass**(-alpha[2])
if log_mode:
# Number per logarithmic mass range, i.e., dN/dlog(M)
return dn_dm * (mass * np.log(10))
else:
# Number per linear mass range, i.e., dN/dM
return dn_dm | PDF for the Kroupa IMF.
Normalization is set over the mass range from 0.1 Msun to 100 Msun | Below is the the instruction that describes the task:
### Input:
PDF for the Kroupa IMF.
Normalization is set over the mass range from 0.1 Msun to 100 Msun
### Response:
def pdf(cls, mass, log_mode=True):
""" PDF for the Kroupa IMF.
Normalization is set over the mass range from 0.1 Msun to 100 Msun
"""
log_mass = np.log10(mass)
# From Eq 2
mb = mbreak = [0.08, 0.5] # Msun
a = alpha = [0.3, 1.3, 2.3] # alpha
# Normalization set from 0.1 -- 100 Msun
norm = 0.27947743949440446
b = 1./norm
c = b * mbreak[0]**(alpha[1]-alpha[0])
d = c * mbreak[1]**(alpha[2]-alpha[1])
dn_dm = b * (mass < 0.08) * mass**(-alpha[0])
dn_dm += c * (0.08 <= mass) * (mass < 0.5) * mass**(-alpha[1])
dn_dm += d * (0.5 <= mass) * mass**(-alpha[2])
if log_mode:
# Number per logarithmic mass range, i.e., dN/dlog(M)
return dn_dm * (mass * np.log(10))
else:
# Number per linear mass range, i.e., dN/dM
return dn_dm |
def submit_cookbook(self, cookbook, params={}, _extra_params={}):
"""
Submit a cookbook.
"""
self._check_user_parameters(params)
files = {'cookbook': cookbook}
return self._submit(params, files, _extra_params=_extra_params) | Submit a cookbook. | Below is the the instruction that describes the task:
### Input:
Submit a cookbook.
### Response:
def submit_cookbook(self, cookbook, params={}, _extra_params={}):
"""
Submit a cookbook.
"""
self._check_user_parameters(params)
files = {'cookbook': cookbook}
return self._submit(params, files, _extra_params=_extra_params) |
def _merge_ex_dicts(sexdict, oexdict):
"""Merge callable look-up tables from two objects."""
# pylint: disable=R0101
for okey, ovalue in oexdict.items():
# Keys in other dictionary are callable paths or
# callables ids depending on the full_cname argument
# of the handler
if okey in sexdict:
svalue = sexdict[okey]
# Each callable dictionary entry is itself a dictionary where the
# key is (exception type, exception message) and the value is a
# dictionary with keys 'function' (a list of callable paths to
# the function), 'raised' (list of booleans indicating whether the
# exception though each callable path was raised or not) and name
for fkey, fvalue in ovalue.items():
if fkey not in svalue:
# The other dictionary has a callable path not present in
# the self dictionary, add it unmodified
sexdict[okey][fkey] = fvalue
else:
# The other dictionary has a callable and exception
# present in the self dictionary, have to check whether
# the callable path is the same (in which case update
# raise flag) or not (in which case add to 'function' and
# 'raise' keys
iobj = zip(fvalue["function"], fvalue["raised"])
for func, raised in iobj:
if func not in sexdict[okey][fkey]["function"]:
sexdict[okey][fkey]["function"].append(func)
sexdict[okey][fkey]["raised"].append(raised)
else:
idx = sexdict[okey][fkey]["function"].index(func)
sraised = sexdict[okey][fkey]["raised"][idx]
if sraised or raised:
sexdict[okey][fkey]["raised"][idx] = True
else:
sexdict[okey] = ovalue | Merge callable look-up tables from two objects. | Below is the the instruction that describes the task:
### Input:
Merge callable look-up tables from two objects.
### Response:
def _merge_ex_dicts(sexdict, oexdict):
"""Merge callable look-up tables from two objects."""
# pylint: disable=R0101
for okey, ovalue in oexdict.items():
# Keys in other dictionary are callable paths or
# callables ids depending on the full_cname argument
# of the handler
if okey in sexdict:
svalue = sexdict[okey]
# Each callable dictionary entry is itself a dictionary where the
# key is (exception type, exception message) and the value is a
# dictionary with keys 'function' (a list of callable paths to
# the function), 'raised' (list of booleans indicating whether the
# exception though each callable path was raised or not) and name
for fkey, fvalue in ovalue.items():
if fkey not in svalue:
# The other dictionary has a callable path not present in
# the self dictionary, add it unmodified
sexdict[okey][fkey] = fvalue
else:
# The other dictionary has a callable and exception
# present in the self dictionary, have to check whether
# the callable path is the same (in which case update
# raise flag) or not (in which case add to 'function' and
# 'raise' keys
iobj = zip(fvalue["function"], fvalue["raised"])
for func, raised in iobj:
if func not in sexdict[okey][fkey]["function"]:
sexdict[okey][fkey]["function"].append(func)
sexdict[okey][fkey]["raised"].append(raised)
else:
idx = sexdict[okey][fkey]["function"].index(func)
sraised = sexdict[okey][fkey]["raised"][idx]
if sraised or raised:
sexdict[okey][fkey]["raised"][idx] = True
else:
sexdict[okey] = ovalue |
def load(target:str, namespace:str=None, default=nodefault, executable:bool=False, separators:Sequence[str]=('.', ':'),
protect:bool=True):
"""This helper function loads an object identified by a dotted-notation string.
For example::
# Load class Foo from example.objects
load('example.objects:Foo')
# Load the result of the class method ``new`` of the Foo object
load('example.objects:Foo.new', executable=True)
If a plugin namespace is provided simple name references are allowed. For example::
# Load the plugin named 'routing' from the 'web.dispatch' namespace
load('routing', 'web.dispatch')
The ``executable``, ``protect``, and first tuple element of ``separators`` are passed to the traverse function.
Providing a namespace does not prevent full object lookup (dot-colon notation) from working.
"""
assert check_argument_types()
if namespace and ':' not in target:
allowable = dict((i.name, i) for i in iter_entry_points(namespace))
if target not in allowable:
raise LookupError('Unknown plugin "' + target + '"; found: ' + ', '.join(allowable))
return allowable[target].load()
parts, _, target = target.partition(separators[1])
try:
obj = __import__(parts)
except ImportError:
if default is not nodefault:
return default
raise
return traverse(
obj,
separators[0].join(parts.split(separators[0])[1:] + target.split(separators[0])),
default = default,
executable = executable,
protect = protect
) if target else obj | This helper function loads an object identified by a dotted-notation string.
For example::
# Load class Foo from example.objects
load('example.objects:Foo')
# Load the result of the class method ``new`` of the Foo object
load('example.objects:Foo.new', executable=True)
If a plugin namespace is provided simple name references are allowed. For example::
# Load the plugin named 'routing' from the 'web.dispatch' namespace
load('routing', 'web.dispatch')
The ``executable``, ``protect``, and first tuple element of ``separators`` are passed to the traverse function.
Providing a namespace does not prevent full object lookup (dot-colon notation) from working. | Below is the the instruction that describes the task:
### Input:
This helper function loads an object identified by a dotted-notation string.
For example::
# Load class Foo from example.objects
load('example.objects:Foo')
# Load the result of the class method ``new`` of the Foo object
load('example.objects:Foo.new', executable=True)
If a plugin namespace is provided simple name references are allowed. For example::
# Load the plugin named 'routing' from the 'web.dispatch' namespace
load('routing', 'web.dispatch')
The ``executable``, ``protect``, and first tuple element of ``separators`` are passed to the traverse function.
Providing a namespace does not prevent full object lookup (dot-colon notation) from working.
### Response:
def load(target:str, namespace:str=None, default=nodefault, executable:bool=False, separators:Sequence[str]=('.', ':'),
protect:bool=True):
"""This helper function loads an object identified by a dotted-notation string.
For example::
# Load class Foo from example.objects
load('example.objects:Foo')
# Load the result of the class method ``new`` of the Foo object
load('example.objects:Foo.new', executable=True)
If a plugin namespace is provided simple name references are allowed. For example::
# Load the plugin named 'routing' from the 'web.dispatch' namespace
load('routing', 'web.dispatch')
The ``executable``, ``protect``, and first tuple element of ``separators`` are passed to the traverse function.
Providing a namespace does not prevent full object lookup (dot-colon notation) from working.
"""
assert check_argument_types()
if namespace and ':' not in target:
allowable = dict((i.name, i) for i in iter_entry_points(namespace))
if target not in allowable:
raise LookupError('Unknown plugin "' + target + '"; found: ' + ', '.join(allowable))
return allowable[target].load()
parts, _, target = target.partition(separators[1])
try:
obj = __import__(parts)
except ImportError:
if default is not nodefault:
return default
raise
return traverse(
obj,
separators[0].join(parts.split(separators[0])[1:] + target.split(separators[0])),
default = default,
executable = executable,
protect = protect
) if target else obj |
def assemble(
cls,
args,
input_tube,
output_tubes,
size,
disable_result,
do_stop_task,
):
"""Create, assemble and start workers.
Workers are created of class *cls*, initialized with *args*, and given
task/result communication channels *input_tube* and *output_tubes*.
The number of workers created is according to *size* parameter.
*do_stop_task* indicates whether doTask() will be called for "stop" request.
"""
# Create the workers.
workers = []
for ii in range(size):
worker = cls(**args)
worker.init2(
input_tube,
output_tubes,
size,
disable_result,
do_stop_task,
)
workers.append(worker)
# Start the workers.
for worker in workers:
worker.start() | Create, assemble and start workers.
Workers are created of class *cls*, initialized with *args*, and given
task/result communication channels *input_tube* and *output_tubes*.
The number of workers created is according to *size* parameter.
*do_stop_task* indicates whether doTask() will be called for "stop" request. | Below is the the instruction that describes the task:
### Input:
Create, assemble and start workers.
Workers are created of class *cls*, initialized with *args*, and given
task/result communication channels *input_tube* and *output_tubes*.
The number of workers created is according to *size* parameter.
*do_stop_task* indicates whether doTask() will be called for "stop" request.
### Response:
def assemble(
cls,
args,
input_tube,
output_tubes,
size,
disable_result,
do_stop_task,
):
"""Create, assemble and start workers.
Workers are created of class *cls*, initialized with *args*, and given
task/result communication channels *input_tube* and *output_tubes*.
The number of workers created is according to *size* parameter.
*do_stop_task* indicates whether doTask() will be called for "stop" request.
"""
# Create the workers.
workers = []
for ii in range(size):
worker = cls(**args)
worker.init2(
input_tube,
output_tubes,
size,
disable_result,
do_stop_task,
)
workers.append(worker)
# Start the workers.
for worker in workers:
worker.start() |
def _extract_links_from_lecture_assets(self, asset_ids):
"""
Extract links to files of the asset ids.
@param asset_ids: List of asset ids.
@type asset_ids: [str]
@return: @see CourseraOnDemand._extract_links_from_text
"""
links = {}
def _add_asset(name, url, destination):
filename, extension = os.path.splitext(clean_url(name))
if extension is '':
return
extension = clean_filename(
extension.lower().strip('.').strip(),
self._unrestricted_filenames)
basename = clean_filename(
os.path.basename(filename),
self._unrestricted_filenames)
url = url.strip()
if extension not in destination:
destination[extension] = []
destination[extension].append((url, basename))
for asset_id in asset_ids:
for asset in self._get_asset_urls(asset_id):
_add_asset(asset['name'], asset['url'], links)
return links | Extract links to files of the asset ids.
@param asset_ids: List of asset ids.
@type asset_ids: [str]
@return: @see CourseraOnDemand._extract_links_from_text | Below is the the instruction that describes the task:
### Input:
Extract links to files of the asset ids.
@param asset_ids: List of asset ids.
@type asset_ids: [str]
@return: @see CourseraOnDemand._extract_links_from_text
### Response:
def _extract_links_from_lecture_assets(self, asset_ids):
"""
Extract links to files of the asset ids.
@param asset_ids: List of asset ids.
@type asset_ids: [str]
@return: @see CourseraOnDemand._extract_links_from_text
"""
links = {}
def _add_asset(name, url, destination):
filename, extension = os.path.splitext(clean_url(name))
if extension is '':
return
extension = clean_filename(
extension.lower().strip('.').strip(),
self._unrestricted_filenames)
basename = clean_filename(
os.path.basename(filename),
self._unrestricted_filenames)
url = url.strip()
if extension not in destination:
destination[extension] = []
destination[extension].append((url, basename))
for asset_id in asset_ids:
for asset in self._get_asset_urls(asset_id):
_add_asset(asset['name'], asset['url'], links)
return links |
def iter_breadth_first(self, root=None):
""" Traverses the belief state's structure breadth-first """
if root == None:
root = self
yield root
last = root
for node in self.iter_breadth_first(root):
if isinstance(node, DictCell):
# recurse
for subpart in node:
yield subpart
last = subpart
if last == node:
return | Traverses the belief state's structure breadth-first | Below is the the instruction that describes the task:
### Input:
Traverses the belief state's structure breadth-first
### Response:
def iter_breadth_first(self, root=None):
""" Traverses the belief state's structure breadth-first """
if root == None:
root = self
yield root
last = root
for node in self.iter_breadth_first(root):
if isinstance(node, DictCell):
# recurse
for subpart in node:
yield subpart
last = subpart
if last == node:
return |
def get_form_kwargs(self):
"""
Build the keyword arguments required to instantiate the form.
"""
kwargs = {}
for key in six.iterkeys(self.form_classes):
if self.request.method in ('POST', 'PUT'):
kwargs[key] = {
'data': self.request.POST,
'files': self.request.FILES,
}
else:
kwargs[key] = {}
return kwargs | Build the keyword arguments required to instantiate the form. | Below is the the instruction that describes the task:
### Input:
Build the keyword arguments required to instantiate the form.
### Response:
def get_form_kwargs(self):
"""
Build the keyword arguments required to instantiate the form.
"""
kwargs = {}
for key in six.iterkeys(self.form_classes):
if self.request.method in ('POST', 'PUT'):
kwargs[key] = {
'data': self.request.POST,
'files': self.request.FILES,
}
else:
kwargs[key] = {}
return kwargs |
def get_port_profile_for_intf_output_interface_interface_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_for_intf = ET.Element("get_port_profile_for_intf")
config = get_port_profile_for_intf
output = ET.SubElement(get_port_profile_for_intf, "output")
interface = ET.SubElement(output, "interface")
interface_type = ET.SubElement(interface, "interface-type")
interface_type.text = kwargs.pop('interface_type')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_port_profile_for_intf_output_interface_interface_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_for_intf = ET.Element("get_port_profile_for_intf")
config = get_port_profile_for_intf
output = ET.SubElement(get_port_profile_for_intf, "output")
interface = ET.SubElement(output, "interface")
interface_type = ET.SubElement(interface, "interface-type")
interface_type.text = kwargs.pop('interface_type')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def DateField(formatter=types.DEFAULT_DATE_FORMAT, default=NOTHING,
required=True, repr=True, cmp=True, key=None):
"""
Create new date field on a model.
:param formatter: date formatter string (default: "%Y-%m-%d")
:param default: any date or string that can be converted to a date value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
"""
default = _init_fields.init_default(required, default, None)
validator = _init_fields.init_validator(required, date)
converter = converters.to_date_field(formatter)
return attrib(default=default, converter=converter, validator=validator,
repr=repr, cmp=cmp,
metadata=dict(formatter=formatter, key=key)) | Create new date field on a model.
:param formatter: date formatter string (default: "%Y-%m-%d")
:param default: any date or string that can be converted to a date value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict. | Below is the the instruction that describes the task:
### Input:
Create new date field on a model.
:param formatter: date formatter string (default: "%Y-%m-%d")
:param default: any date or string that can be converted to a date value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
### Response:
def DateField(formatter=types.DEFAULT_DATE_FORMAT, default=NOTHING,
required=True, repr=True, cmp=True, key=None):
"""
Create new date field on a model.
:param formatter: date formatter string (default: "%Y-%m-%d")
:param default: any date or string that can be converted to a date value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
"""
default = _init_fields.init_default(required, default, None)
validator = _init_fields.init_validator(required, date)
converter = converters.to_date_field(formatter)
return attrib(default=default, converter=converter, validator=validator,
repr=repr, cmp=cmp,
metadata=dict(formatter=formatter, key=key)) |
def generate(self, save=True):
"""
Generates photo file in current format.
If ``save`` is ``True``, file is saved too.
"""
stretched_photo, crop_box = self._generate_img()
# set crop_box to (0,0,0,0) if photo not cropped
if not crop_box:
crop_box = 0, 0, 0, 0
self.crop_left, self.crop_top, right, bottom = crop_box
self.crop_width = right - self.crop_left
self.crop_height = bottom - self.crop_top
self.width, self.height = stretched_photo.size
f = StringIO()
imgf = (self.photo._get_image().format or
Image.EXTENSION[path.splitext(self.photo.image.name)[1]])
stretched_photo.save(f, format=imgf, quality=self.format.resample_quality)
f.seek(0)
self.image.save(self.file(), ContentFile(f.read()), save) | Generates photo file in current format.
If ``save`` is ``True``, file is saved too. | Below is the the instruction that describes the task:
### Input:
Generates photo file in current format.
If ``save`` is ``True``, file is saved too.
### Response:
def generate(self, save=True):
"""
Generates photo file in current format.
If ``save`` is ``True``, file is saved too.
"""
stretched_photo, crop_box = self._generate_img()
# set crop_box to (0,0,0,0) if photo not cropped
if not crop_box:
crop_box = 0, 0, 0, 0
self.crop_left, self.crop_top, right, bottom = crop_box
self.crop_width = right - self.crop_left
self.crop_height = bottom - self.crop_top
self.width, self.height = stretched_photo.size
f = StringIO()
imgf = (self.photo._get_image().format or
Image.EXTENSION[path.splitext(self.photo.image.name)[1]])
stretched_photo.save(f, format=imgf, quality=self.format.resample_quality)
f.seek(0)
self.image.save(self.file(), ContentFile(f.read()), save) |
def open_dataset(self, recent=None, debug_filename=None, bids=False):
"""Open a new dataset.
Parameters
----------
recent : path to file
one of the recent datasets to read
"""
if recent:
filename = recent
elif debug_filename is not None:
filename = debug_filename
else:
try:
dir_name = dirname(self.filename)
except (AttributeError, TypeError):
dir_name = self.parent.value('recording_dir')
file_or_dir = choose_file_or_dir()
if file_or_dir == 'dir':
filename = QFileDialog.getExistingDirectory(self,
'Open directory',
dir_name)
elif file_or_dir == 'file':
filename, _ = QFileDialog.getOpenFileName(self, 'Open file',
dir_name)
elif file_or_dir == 'abort':
return
if filename == '':
return
# clear previous dataset once the user opens another dataset
if self.dataset is not None:
self.parent.reset()
self.parent.statusBar().showMessage('Reading dataset: ' +
basename(filename))
lg.info('Reading dataset: ' + str(filename))
self.filename = filename # temp
self.dataset = Dataset(filename) #temp
#==============================================================================
# try:
# self.filename = filename
# self.dataset = Dataset(filename)
# except FileNotFoundError:
# msg = 'File ' + basename(filename) + ' cannot be read'
# self.parent.statusBar().showMessage(msg)
# lg.info(msg)
# error_dialog = QErrorMessage()
# error_dialog.setWindowTitle('Error opening dataset')
# error_dialog.showMessage(msg)
# if debug_filename is None:
# error_dialog.exec()
# return
#
# except BaseException as err:
# self.parent.statusBar().showMessage(str(err))
# lg.info('Error ' + str(err))
# error_dialog = QErrorMessage()
# error_dialog.setWindowTitle('Error opening dataset')
# error_dialog.showMessage(str(err))
# if debug_filename is None:
# error_dialog.exec()
# return
#==============================================================================
self.action['export'].setEnabled(True)
self.parent.statusBar().showMessage('')
self.parent.update() | Open a new dataset.
Parameters
----------
recent : path to file
one of the recent datasets to read | Below is the the instruction that describes the task:
### Input:
Open a new dataset.
Parameters
----------
recent : path to file
one of the recent datasets to read
### Response:
def open_dataset(self, recent=None, debug_filename=None, bids=False):
"""Open a new dataset.
Parameters
----------
recent : path to file
one of the recent datasets to read
"""
if recent:
filename = recent
elif debug_filename is not None:
filename = debug_filename
else:
try:
dir_name = dirname(self.filename)
except (AttributeError, TypeError):
dir_name = self.parent.value('recording_dir')
file_or_dir = choose_file_or_dir()
if file_or_dir == 'dir':
filename = QFileDialog.getExistingDirectory(self,
'Open directory',
dir_name)
elif file_or_dir == 'file':
filename, _ = QFileDialog.getOpenFileName(self, 'Open file',
dir_name)
elif file_or_dir == 'abort':
return
if filename == '':
return
# clear previous dataset once the user opens another dataset
if self.dataset is not None:
self.parent.reset()
self.parent.statusBar().showMessage('Reading dataset: ' +
basename(filename))
lg.info('Reading dataset: ' + str(filename))
self.filename = filename # temp
self.dataset = Dataset(filename) #temp
#==============================================================================
# try:
# self.filename = filename
# self.dataset = Dataset(filename)
# except FileNotFoundError:
# msg = 'File ' + basename(filename) + ' cannot be read'
# self.parent.statusBar().showMessage(msg)
# lg.info(msg)
# error_dialog = QErrorMessage()
# error_dialog.setWindowTitle('Error opening dataset')
# error_dialog.showMessage(msg)
# if debug_filename is None:
# error_dialog.exec()
# return
#
# except BaseException as err:
# self.parent.statusBar().showMessage(str(err))
# lg.info('Error ' + str(err))
# error_dialog = QErrorMessage()
# error_dialog.setWindowTitle('Error opening dataset')
# error_dialog.showMessage(str(err))
# if debug_filename is None:
# error_dialog.exec()
# return
#==============================================================================
self.action['export'].setEnabled(True)
self.parent.statusBar().showMessage('')
self.parent.update() |
def datasets_delete(self, dataset_name, delete_contents):
"""Issues a request to delete a dataset.
Args:
dataset_name: the name of the dataset to delete.
delete_contents: if True, any tables in the dataset will be deleted. If False and the
dataset is non-empty an exception will be raised.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._DATASETS_PATH % dataset_name)
args = {}
if delete_contents:
args['deleteContents'] = True
return datalab.utils.Http.request(url, method='DELETE', args=args,
credentials=self._credentials, raw_response=True) | Issues a request to delete a dataset.
Args:
dataset_name: the name of the dataset to delete.
delete_contents: if True, any tables in the dataset will be deleted. If False and the
dataset is non-empty an exception will be raised.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation. | Below is the the instruction that describes the task:
### Input:
Issues a request to delete a dataset.
Args:
dataset_name: the name of the dataset to delete.
delete_contents: if True, any tables in the dataset will be deleted. If False and the
dataset is non-empty an exception will be raised.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
### Response:
def datasets_delete(self, dataset_name, delete_contents):
"""Issues a request to delete a dataset.
Args:
dataset_name: the name of the dataset to delete.
delete_contents: if True, any tables in the dataset will be deleted. If False and the
dataset is non-empty an exception will be raised.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._DATASETS_PATH % dataset_name)
args = {}
if delete_contents:
args['deleteContents'] = True
return datalab.utils.Http.request(url, method='DELETE', args=args,
credentials=self._credentials, raw_response=True) |
def to_location(location):
"""Serializes location to string
:param location: object to serialize
:return: string
"""
if not location:
raise SbgError('Location is required!')
if isinstance(location, six.string_types):
return location
else:
raise SbgError('Invalid location parameter!') | Serializes location to string
:param location: object to serialize
:return: string | Below is the the instruction that describes the task:
### Input:
Serializes location to string
:param location: object to serialize
:return: string
### Response:
def to_location(location):
"""Serializes location to string
:param location: object to serialize
:return: string
"""
if not location:
raise SbgError('Location is required!')
if isinstance(location, six.string_types):
return location
else:
raise SbgError('Invalid location parameter!') |
def all(self, data={}, **kwargs):
""""
Fetch all Invoice entities
Returns:
Dictionary of Invoice data
"""
return super(Invoice, self).all(data, **kwargs) | Fetch all Invoice entities
Returns:
Dictionary of Invoice data | Below is the the instruction that describes the task:
### Input:
Fetch all Invoice entities
Returns:
Dictionary of Invoice data
### Response:
def all(self, data={}, **kwargs):
""""
Fetch all Invoice entities
Returns:
Dictionary of Invoice data
"""
return super(Invoice, self).all(data, **kwargs) |
def login(self, username, password, mode="demo"):
"""login function"""
url = "https://trading212.com/it/login"
try:
logger.debug(f"visiting %s" % url)
self.browser.visit(url)
logger.debug(f"connected to %s" % url)
except selenium.common.exceptions.WebDriverException:
logger.critical("connection timed out")
raise
try:
self.search_name("login[username]").fill(username)
self.search_name("login[password]").fill(password)
self.css1(path['log']).click()
# define a timeout for logging in
timeout = time.time() + 30
while not self.elCss(path['logo']):
if time.time() > timeout:
logger.critical("login failed")
raise CredentialsException(username)
time.sleep(1)
logger.info(f"logged in as {username}")
# check if it's a weekend
if mode == "demo" and datetime.now().isoweekday() in range(5, 8):
timeout = time.time() + 10
while not self.elCss(path['alert-box']):
if time.time() > timeout:
logger.warning("weekend trading alert-box not closed")
break
if self.elCss(path['alert-box']):
self.css1(path['alert-box']).click()
logger.debug("weekend trading alert-box closed")
except Exception as e:
logger.critical("login failed")
raise exceptions.BaseExc(e)
return True | login function | Below is the the instruction that describes the task:
### Input:
login function
### Response:
def login(self, username, password, mode="demo"):
"""login function"""
url = "https://trading212.com/it/login"
try:
logger.debug(f"visiting %s" % url)
self.browser.visit(url)
logger.debug(f"connected to %s" % url)
except selenium.common.exceptions.WebDriverException:
logger.critical("connection timed out")
raise
try:
self.search_name("login[username]").fill(username)
self.search_name("login[password]").fill(password)
self.css1(path['log']).click()
# define a timeout for logging in
timeout = time.time() + 30
while not self.elCss(path['logo']):
if time.time() > timeout:
logger.critical("login failed")
raise CredentialsException(username)
time.sleep(1)
logger.info(f"logged in as {username}")
# check if it's a weekend
if mode == "demo" and datetime.now().isoweekday() in range(5, 8):
timeout = time.time() + 10
while not self.elCss(path['alert-box']):
if time.time() > timeout:
logger.warning("weekend trading alert-box not closed")
break
if self.elCss(path['alert-box']):
self.css1(path['alert-box']).click()
logger.debug("weekend trading alert-box closed")
except Exception as e:
logger.critical("login failed")
raise exceptions.BaseExc(e)
return True |
def get_preference_type(self, type, user_id, address, notification):
"""
Get a preference.
Fetch the preference for the given notification for the given communicaiton channel
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - user_id
"""ID"""
path["user_id"] = user_id
# REQUIRED - PATH - type
"""ID"""
path["type"] = type
# REQUIRED - PATH - address
"""ID"""
path["address"] = address
# REQUIRED - PATH - notification
"""ID"""
path["notification"] = notification
self.logger.debug("GET /api/v1/users/{user_id}/communication_channels/{type}/{address}/notification_preferences/{notification} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/users/{user_id}/communication_channels/{type}/{address}/notification_preferences/{notification}".format(**path), data=data, params=params, single_item=True) | Get a preference.
Fetch the preference for the given notification for the given communicaiton channel | Below is the the instruction that describes the task:
### Input:
Get a preference.
Fetch the preference for the given notification for the given communicaiton channel
### Response:
def get_preference_type(self, type, user_id, address, notification):
"""
Get a preference.
Fetch the preference for the given notification for the given communicaiton channel
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - user_id
"""ID"""
path["user_id"] = user_id
# REQUIRED - PATH - type
"""ID"""
path["type"] = type
# REQUIRED - PATH - address
"""ID"""
path["address"] = address
# REQUIRED - PATH - notification
"""ID"""
path["notification"] = notification
self.logger.debug("GET /api/v1/users/{user_id}/communication_channels/{type}/{address}/notification_preferences/{notification} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/users/{user_id}/communication_channels/{type}/{address}/notification_preferences/{notification}".format(**path), data=data, params=params, single_item=True) |
def do_rmpostfix(self, arg):
"""Removes a postfix function from a variable. See 'postfix'."""
altered = False
if arg in self.curargs["functions"]:
del self.curargs["functions"][arg]
altered = True
elif arg == "*":
for varname in list(self.curargs["functions"].keys()):
del self.curargs["functions"][varname]
altered = True
if altered:
self.do_postfix("list") | Removes a postfix function from a variable. See 'postfix'. | Below is the the instruction that describes the task:
### Input:
Removes a postfix function from a variable. See 'postfix'.
### Response:
def do_rmpostfix(self, arg):
"""Removes a postfix function from a variable. See 'postfix'."""
altered = False
if arg in self.curargs["functions"]:
del self.curargs["functions"][arg]
altered = True
elif arg == "*":
for varname in list(self.curargs["functions"].keys()):
del self.curargs["functions"][varname]
altered = True
if altered:
self.do_postfix("list") |
def setExtraSelections(self, selections):
"""Set list of extra selections.
Selections are list of tuples ``(startAbsolutePosition, length)``.
Extra selections are reset on any text modification.
This is reimplemented method of QPlainTextEdit, it has different signature. Do not use QPlainTextEdit method
"""
def _makeQtExtraSelection(startAbsolutePosition, length):
selection = QTextEdit.ExtraSelection()
cursor = QTextCursor(self.document())
cursor.setPosition(startAbsolutePosition)
cursor.setPosition(startAbsolutePosition + length, QTextCursor.KeepAnchor)
selection.cursor = cursor
selection.format = self._userExtraSelectionFormat
return selection
self._userExtraSelections = [_makeQtExtraSelection(*item) for item in selections]
self._updateExtraSelections() | Set list of extra selections.
Selections are list of tuples ``(startAbsolutePosition, length)``.
Extra selections are reset on any text modification.
This is reimplemented method of QPlainTextEdit, it has different signature. Do not use QPlainTextEdit method | Below is the the instruction that describes the task:
### Input:
Set list of extra selections.
Selections are list of tuples ``(startAbsolutePosition, length)``.
Extra selections are reset on any text modification.
This is reimplemented method of QPlainTextEdit, it has different signature. Do not use QPlainTextEdit method
### Response:
def setExtraSelections(self, selections):
"""Set list of extra selections.
Selections are list of tuples ``(startAbsolutePosition, length)``.
Extra selections are reset on any text modification.
This is reimplemented method of QPlainTextEdit, it has different signature. Do not use QPlainTextEdit method
"""
def _makeQtExtraSelection(startAbsolutePosition, length):
selection = QTextEdit.ExtraSelection()
cursor = QTextCursor(self.document())
cursor.setPosition(startAbsolutePosition)
cursor.setPosition(startAbsolutePosition + length, QTextCursor.KeepAnchor)
selection.cursor = cursor
selection.format = self._userExtraSelectionFormat
return selection
self._userExtraSelections = [_makeQtExtraSelection(*item) for item in selections]
self._updateExtraSelections() |
def setFilenames( self, filenames ):
"""
Sets the list of filenames that will be used for this menu to the \
inputed list.
:param filenames | [<str>, ..]
"""
mapped = []
for filename in filenames:
filename = nativestring(filename)
if ( not filename ):
continue
mapped.append(filename)
if ( len(mapped) == self.maximumLength() ):
break
self._filenames = mapped
self.refresh() | Sets the list of filenames that will be used for this menu to the \
inputed list.
:param filenames | [<str>, ..] | Below is the the instruction that describes the task:
### Input:
Sets the list of filenames that will be used for this menu to the \
inputed list.
:param filenames | [<str>, ..]
### Response:
def setFilenames( self, filenames ):
"""
Sets the list of filenames that will be used for this menu to the \
inputed list.
:param filenames | [<str>, ..]
"""
mapped = []
for filename in filenames:
filename = nativestring(filename)
if ( not filename ):
continue
mapped.append(filename)
if ( len(mapped) == self.maximumLength() ):
break
self._filenames = mapped
self.refresh() |
def _assign_database_backend(self, db):
"""Assign Trace instance to stochastics and deterministics and Database instance
to self.
:Parameters:
- `db` : string, Database instance
The name of the database module (see below), or a Database instance.
Available databases:
- `no_trace` : Traces are not stored at all.
- `ram` : Traces stored in memory.
- `txt` : Traces stored in memory and saved in txt files at end of
sampling.
- `sqlite` : Traces stored in sqlite database.
- `hdf5` : Traces stored in an HDF5 file.
"""
# Objects that are not to be tallied are assigned a no_trace.Trace
# Tallyable objects are listed in the _nodes_to_tally set.
no_trace = getattr(database, 'no_trace')
self._variables_to_tally = set()
for object in self.stochastics | self.deterministics:
if object.keep_trace:
self._variables_to_tally.add(object)
try:
if object.mask is None:
# Standard stochastic
self._funs_to_tally[object.__name__] = object.get_value
else:
# Has missing values, so only fetch stochastic elements
# using mask
self._funs_to_tally[
object.__name__] = object.get_stoch_value
except AttributeError:
# Not a stochastic object, so no mask
self._funs_to_tally[object.__name__] = object.get_value
else:
object.trace = no_trace.Trace(object.__name__)
check_valid_object_name(self._variables_to_tally)
# If not already done, load the trace backend from the database
# module, and assign a database instance to Model.
if isinstance(db, str):
if db in dir(database):
module = getattr(database, db)
# Assign a default name for the database output file.
if self._db_args.get('dbname') is None:
self._db_args['dbname'] = self.__name__ + '.' + db
self.db = module.Database(**self._db_args)
elif db in database.__modules__:
raise ImportError(
'Database backend `%s` is not properly installed. Please see the documentation for instructions.' % db)
else:
raise AttributeError(
'Database backend `%s` is not defined in pymc.database.' % db)
elif isinstance(db, database.base.Database):
self.db = db
self.restore_sampler_state()
else: # What is this for? DH.
self.db = db.Database(**self._db_args) | Assign Trace instance to stochastics and deterministics and Database instance
to self.
:Parameters:
- `db` : string, Database instance
The name of the database module (see below), or a Database instance.
Available databases:
- `no_trace` : Traces are not stored at all.
- `ram` : Traces stored in memory.
- `txt` : Traces stored in memory and saved in txt files at end of
sampling.
- `sqlite` : Traces stored in sqlite database.
- `hdf5` : Traces stored in an HDF5 file. | Below is the the instruction that describes the task:
### Input:
Assign Trace instance to stochastics and deterministics and Database instance
to self.
:Parameters:
- `db` : string, Database instance
The name of the database module (see below), or a Database instance.
Available databases:
- `no_trace` : Traces are not stored at all.
- `ram` : Traces stored in memory.
- `txt` : Traces stored in memory and saved in txt files at end of
sampling.
- `sqlite` : Traces stored in sqlite database.
- `hdf5` : Traces stored in an HDF5 file.
### Response:
def _assign_database_backend(self, db):
"""Assign Trace instance to stochastics and deterministics and Database instance
to self.
:Parameters:
- `db` : string, Database instance
The name of the database module (see below), or a Database instance.
Available databases:
- `no_trace` : Traces are not stored at all.
- `ram` : Traces stored in memory.
- `txt` : Traces stored in memory and saved in txt files at end of
sampling.
- `sqlite` : Traces stored in sqlite database.
- `hdf5` : Traces stored in an HDF5 file.
"""
# Objects that are not to be tallied are assigned a no_trace.Trace
# Tallyable objects are listed in the _nodes_to_tally set.
no_trace = getattr(database, 'no_trace')
self._variables_to_tally = set()
for object in self.stochastics | self.deterministics:
if object.keep_trace:
self._variables_to_tally.add(object)
try:
if object.mask is None:
# Standard stochastic
self._funs_to_tally[object.__name__] = object.get_value
else:
# Has missing values, so only fetch stochastic elements
# using mask
self._funs_to_tally[
object.__name__] = object.get_stoch_value
except AttributeError:
# Not a stochastic object, so no mask
self._funs_to_tally[object.__name__] = object.get_value
else:
object.trace = no_trace.Trace(object.__name__)
check_valid_object_name(self._variables_to_tally)
# If not already done, load the trace backend from the database
# module, and assign a database instance to Model.
if isinstance(db, str):
if db in dir(database):
module = getattr(database, db)
# Assign a default name for the database output file.
if self._db_args.get('dbname') is None:
self._db_args['dbname'] = self.__name__ + '.' + db
self.db = module.Database(**self._db_args)
elif db in database.__modules__:
raise ImportError(
'Database backend `%s` is not properly installed. Please see the documentation for instructions.' % db)
else:
raise AttributeError(
'Database backend `%s` is not defined in pymc.database.' % db)
elif isinstance(db, database.base.Database):
self.db = db
self.restore_sampler_state()
else: # What is this for? DH.
self.db = db.Database(**self._db_args) |
def set_rotation_rate(self, val):
"""value ca be between 0x00 and 0xFF:
value is a multiplied with 0.784 degrees/s except for:
0 --> 1 degrees/s
255 --> jumps to 400 degrees/s
"""
return self.write(request.SetRotationRate(self.seq, val)) | value ca be between 0x00 and 0xFF:
value is a multiplied with 0.784 degrees/s except for:
0 --> 1 degrees/s
255 --> jumps to 400 degrees/s | Below is the the instruction that describes the task:
### Input:
value ca be between 0x00 and 0xFF:
value is a multiplied with 0.784 degrees/s except for:
0 --> 1 degrees/s
255 --> jumps to 400 degrees/s
### Response:
def set_rotation_rate(self, val):
"""value ca be between 0x00 and 0xFF:
value is a multiplied with 0.784 degrees/s except for:
0 --> 1 degrees/s
255 --> jumps to 400 degrees/s
"""
return self.write(request.SetRotationRate(self.seq, val)) |
def graph_nodes_sorted(self):
""" Returns an (ascending) sorted list of graph's nodes (name is used as key).
Returns
-------
:any:`list`
Description #TODO check
"""
return sorted(self._graph.nodes(), key=lambda _: repr(_)) | Returns an (ascending) sorted list of graph's nodes (name is used as key).
Returns
-------
:any:`list`
Description #TODO check | Below is the the instruction that describes the task:
### Input:
Returns an (ascending) sorted list of graph's nodes (name is used as key).
Returns
-------
:any:`list`
Description #TODO check
### Response:
def graph_nodes_sorted(self):
""" Returns an (ascending) sorted list of graph's nodes (name is used as key).
Returns
-------
:any:`list`
Description #TODO check
"""
return sorted(self._graph.nodes(), key=lambda _: repr(_)) |
def course_is_open_to_user(self, course, username=None, lti=None):
"""
Checks if a user is can access a course
:param course: a Course object
:param username: The username of the user that we want to check. If None, uses self.session_username()
:param lti: indicates if the user is currently in a LTI session or not.
- None to ignore the check
- True to indicate the user is in a LTI session
- False to indicate the user is not in a LTI session
- "auto" to enable the check and take the information from the current session
:return: True if the user can access the course, False else
"""
if username is None:
username = self.session_username()
if lti == "auto":
lti = self.session_lti_info() is not None
if self.has_staff_rights_on_course(course, username):
return True
if not course.get_accessibility().is_open() or (not self.course_is_user_registered(course, username) and not course.allow_preview()):
return False
if lti and course.is_lti() != lti:
return False
if lti is False and course.is_lti():
return not course.lti_send_back_grade()
return True | Checks if a user is can access a course
:param course: a Course object
:param username: The username of the user that we want to check. If None, uses self.session_username()
:param lti: indicates if the user is currently in a LTI session or not.
- None to ignore the check
- True to indicate the user is in a LTI session
- False to indicate the user is not in a LTI session
- "auto" to enable the check and take the information from the current session
:return: True if the user can access the course, False else | Below is the the instruction that describes the task:
### Input:
Checks if a user is can access a course
:param course: a Course object
:param username: The username of the user that we want to check. If None, uses self.session_username()
:param lti: indicates if the user is currently in a LTI session or not.
- None to ignore the check
- True to indicate the user is in a LTI session
- False to indicate the user is not in a LTI session
- "auto" to enable the check and take the information from the current session
:return: True if the user can access the course, False else
### Response:
def course_is_open_to_user(self, course, username=None, lti=None):
"""
Checks if a user is can access a course
:param course: a Course object
:param username: The username of the user that we want to check. If None, uses self.session_username()
:param lti: indicates if the user is currently in a LTI session or not.
- None to ignore the check
- True to indicate the user is in a LTI session
- False to indicate the user is not in a LTI session
- "auto" to enable the check and take the information from the current session
:return: True if the user can access the course, False else
"""
if username is None:
username = self.session_username()
if lti == "auto":
lti = self.session_lti_info() is not None
if self.has_staff_rights_on_course(course, username):
return True
if not course.get_accessibility().is_open() or (not self.course_is_user_registered(course, username) and not course.allow_preview()):
return False
if lti and course.is_lti() != lti:
return False
if lti is False and course.is_lti():
return not course.lti_send_back_grade()
return True |
def editex(src, tar, cost=(0, 1, 2), local=False):
"""Return the Editex distance between two strings.
This is a wrapper for :py:meth:`Editex.dist_abs`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
cost : tuple
A 3-tuple representing the cost of the four possible edits: match,
same-group, and mismatch respectively (by default: (0, 1, 2))
local : bool
If True, the local variant of Editex is used
Returns
-------
int
Editex distance
Examples
--------
>>> editex('cat', 'hat')
2
>>> editex('Niall', 'Neil')
2
>>> editex('aluminum', 'Catalan')
12
>>> editex('ATCG', 'TAGC')
6
"""
return Editex().dist_abs(src, tar, cost, local) | Return the Editex distance between two strings.
This is a wrapper for :py:meth:`Editex.dist_abs`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
cost : tuple
A 3-tuple representing the cost of the four possible edits: match,
same-group, and mismatch respectively (by default: (0, 1, 2))
local : bool
If True, the local variant of Editex is used
Returns
-------
int
Editex distance
Examples
--------
>>> editex('cat', 'hat')
2
>>> editex('Niall', 'Neil')
2
>>> editex('aluminum', 'Catalan')
12
>>> editex('ATCG', 'TAGC')
6 | Below is the the instruction that describes the task:
### Input:
Return the Editex distance between two strings.
This is a wrapper for :py:meth:`Editex.dist_abs`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
cost : tuple
A 3-tuple representing the cost of the four possible edits: match,
same-group, and mismatch respectively (by default: (0, 1, 2))
local : bool
If True, the local variant of Editex is used
Returns
-------
int
Editex distance
Examples
--------
>>> editex('cat', 'hat')
2
>>> editex('Niall', 'Neil')
2
>>> editex('aluminum', 'Catalan')
12
>>> editex('ATCG', 'TAGC')
6
### Response:
def editex(src, tar, cost=(0, 1, 2), local=False):
"""Return the Editex distance between two strings.
This is a wrapper for :py:meth:`Editex.dist_abs`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
cost : tuple
A 3-tuple representing the cost of the four possible edits: match,
same-group, and mismatch respectively (by default: (0, 1, 2))
local : bool
If True, the local variant of Editex is used
Returns
-------
int
Editex distance
Examples
--------
>>> editex('cat', 'hat')
2
>>> editex('Niall', 'Neil')
2
>>> editex('aluminum', 'Catalan')
12
>>> editex('ATCG', 'TAGC')
6
"""
return Editex().dist_abs(src, tar, cost, local) |
def point_seg_sep(ar, br1, br2):
"""Return the minimum separation vector between a point and a line segment,
in 3 dimensions.
Parameters
----------
ar: array-like, shape (3,)
Coordinates of a point.
br1, br2: array-like, shape (3,)
Coordinates for the points of a line segment
Returns
-------
sep: float array, shape (3,)
Separation vector between point and line segment.
"""
v = br2 - br1
w = ar - br1
c1 = np.dot(w, v)
if c1 <= 0.0:
return ar - br1
c2 = np.sum(np.square(v))
if c2 <= c1:
return ar - br2
b = c1 / c2
bc = br1 + b * v
return ar - bc | Return the minimum separation vector between a point and a line segment,
in 3 dimensions.
Parameters
----------
ar: array-like, shape (3,)
Coordinates of a point.
br1, br2: array-like, shape (3,)
Coordinates for the points of a line segment
Returns
-------
sep: float array, shape (3,)
Separation vector between point and line segment. | Below is the the instruction that describes the task:
### Input:
Return the minimum separation vector between a point and a line segment,
in 3 dimensions.
Parameters
----------
ar: array-like, shape (3,)
Coordinates of a point.
br1, br2: array-like, shape (3,)
Coordinates for the points of a line segment
Returns
-------
sep: float array, shape (3,)
Separation vector between point and line segment.
### Response:
def point_seg_sep(ar, br1, br2):
"""Return the minimum separation vector between a point and a line segment,
in 3 dimensions.
Parameters
----------
ar: array-like, shape (3,)
Coordinates of a point.
br1, br2: array-like, shape (3,)
Coordinates for the points of a line segment
Returns
-------
sep: float array, shape (3,)
Separation vector between point and line segment.
"""
v = br2 - br1
w = ar - br1
c1 = np.dot(w, v)
if c1 <= 0.0:
return ar - br1
c2 = np.sum(np.square(v))
if c2 <= c1:
return ar - br2
b = c1 / c2
bc = br1 + b * v
return ar - bc |
async def main():
"""The main part of the example script."""
async with aiohttp.ClientSession() as session:
data = Glances(loop, session, version=VERSION)
# Get the metrics for the memory
await data.get_metrics('mem')
# Print the values
print("Memory values:", data.values)
# Get the metrics about the disks
await data.get_metrics('diskio')
# Print the values
print("Disk values:", data.values) | The main part of the example script. | Below is the the instruction that describes the task:
### Input:
The main part of the example script.
### Response:
async def main():
"""The main part of the example script."""
async with aiohttp.ClientSession() as session:
data = Glances(loop, session, version=VERSION)
# Get the metrics for the memory
await data.get_metrics('mem')
# Print the values
print("Memory values:", data.values)
# Get the metrics about the disks
await data.get_metrics('diskio')
# Print the values
print("Disk values:", data.values) |
def data(self, index, role=Qt.DisplayRole):
"""Cell content"""
if not index.isValid():
return to_qvariant()
value = self.get_value(index)
if index.column() == 3 and self.remote:
value = value['view']
if index.column() == 3:
display = value_to_display(value, minmax=self.minmax)
else:
if is_type_text_string(value):
display = to_text_string(value, encoding="utf-8")
else:
display = to_text_string(value)
if role == Qt.DisplayRole:
return to_qvariant(display)
elif role == Qt.EditRole:
return to_qvariant(value_to_display(value))
elif role == Qt.TextAlignmentRole:
if index.column() == 3:
if len(display.splitlines()) < 3:
return to_qvariant(int(Qt.AlignLeft|Qt.AlignVCenter))
else:
return to_qvariant(int(Qt.AlignLeft|Qt.AlignTop))
else:
return to_qvariant(int(Qt.AlignLeft|Qt.AlignVCenter))
elif role == Qt.BackgroundColorRole:
return to_qvariant( self.get_bgcolor(index) )
elif role == Qt.FontRole:
return to_qvariant(get_font(font_size_delta=DEFAULT_SMALL_DELTA))
return to_qvariant() | Cell content | Below is the the instruction that describes the task:
### Input:
Cell content
### Response:
def data(self, index, role=Qt.DisplayRole):
"""Cell content"""
if not index.isValid():
return to_qvariant()
value = self.get_value(index)
if index.column() == 3 and self.remote:
value = value['view']
if index.column() == 3:
display = value_to_display(value, minmax=self.minmax)
else:
if is_type_text_string(value):
display = to_text_string(value, encoding="utf-8")
else:
display = to_text_string(value)
if role == Qt.DisplayRole:
return to_qvariant(display)
elif role == Qt.EditRole:
return to_qvariant(value_to_display(value))
elif role == Qt.TextAlignmentRole:
if index.column() == 3:
if len(display.splitlines()) < 3:
return to_qvariant(int(Qt.AlignLeft|Qt.AlignVCenter))
else:
return to_qvariant(int(Qt.AlignLeft|Qt.AlignTop))
else:
return to_qvariant(int(Qt.AlignLeft|Qt.AlignVCenter))
elif role == Qt.BackgroundColorRole:
return to_qvariant( self.get_bgcolor(index) )
elif role == Qt.FontRole:
return to_qvariant(get_font(font_size_delta=DEFAULT_SMALL_DELTA))
return to_qvariant() |
def get_state_paths(cls, impl, working_dir):
"""
Get the set of state paths that point to the current chain and state info.
Returns a list of paths.
"""
return [config.get_db_filename(impl, working_dir), config.get_snapshots_filename(impl, working_dir)] | Get the set of state paths that point to the current chain and state info.
Returns a list of paths. | Below is the the instruction that describes the task:
### Input:
Get the set of state paths that point to the current chain and state info.
Returns a list of paths.
### Response:
def get_state_paths(cls, impl, working_dir):
"""
Get the set of state paths that point to the current chain and state info.
Returns a list of paths.
"""
return [config.get_db_filename(impl, working_dir), config.get_snapshots_filename(impl, working_dir)] |
def redirect_std():
"""
Connect stdin/stdout to controlling terminal even if the scripts input and output
were redirected. This is useful in utilities based on termenu.
"""
stdin = sys.stdin
stdout = sys.stdout
if not sys.stdin.isatty():
sys.stdin = open_raw("/dev/tty", "r", 0)
if not sys.stdout.isatty():
sys.stdout = open_raw("/dev/tty", "w", 0)
return stdin, stdout | Connect stdin/stdout to controlling terminal even if the scripts input and output
were redirected. This is useful in utilities based on termenu. | Below is the the instruction that describes the task:
### Input:
Connect stdin/stdout to controlling terminal even if the scripts input and output
were redirected. This is useful in utilities based on termenu.
### Response:
def redirect_std():
"""
Connect stdin/stdout to controlling terminal even if the scripts input and output
were redirected. This is useful in utilities based on termenu.
"""
stdin = sys.stdin
stdout = sys.stdout
if not sys.stdin.isatty():
sys.stdin = open_raw("/dev/tty", "r", 0)
if not sys.stdout.isatty():
sys.stdout = open_raw("/dev/tty", "w", 0)
return stdin, stdout |
def update_user_type(self):
"""Return either 'tutor' or 'student' based on which radio
button is selected.
"""
if self.rb_tutor.isChecked():
self.user_type = 'tutor'
elif self.rb_student.isChecked():
self.user_type = 'student'
self.accept() | Return either 'tutor' or 'student' based on which radio
button is selected. | Below is the the instruction that describes the task:
### Input:
Return either 'tutor' or 'student' based on which radio
button is selected.
### Response:
def update_user_type(self):
"""Return either 'tutor' or 'student' based on which radio
button is selected.
"""
if self.rb_tutor.isChecked():
self.user_type = 'tutor'
elif self.rb_student.isChecked():
self.user_type = 'student'
self.accept() |
def disconnect():
""" disconnect signals """
post_save.disconnect(node_created_handler, sender=Node)
node_status_changed.disconnect(node_status_changed_handler)
pre_delete.disconnect(node_deleted_handler, sender=Node) | disconnect signals | Below is the the instruction that describes the task:
### Input:
disconnect signals
### Response:
def disconnect():
""" disconnect signals """
post_save.disconnect(node_created_handler, sender=Node)
node_status_changed.disconnect(node_status_changed_handler)
pre_delete.disconnect(node_deleted_handler, sender=Node) |
async def send_animation(self,
animation: typing.Union[base.InputFile, base.String],
duration: typing.Union[base.Integer, None] = None,
width: typing.Union[base.Integer, None] = None,
height: typing.Union[base.Integer, None] = None,
thumb: typing.Union[typing.Union[base.InputFile, base.String], None] = None,
caption: typing.Union[base.String, None] = None,
parse_mode: typing.Union[base.String, None] = None,
disable_notification: typing.Union[base.Boolean, None] = None,
reply_markup=None,
reply=True) -> Message:
"""
Use this method to send animation files (GIF or H.264/MPEG-4 AVC video without sound).
On success, the sent Message is returned.
Bots can currently send animation files of up to 50 MB in size, this limit may be changed in the future.
Source https://core.telegram.org/bots/api#sendanimation
:param animation: Animation to send. Pass a file_id as String to send an animation that exists
on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get an animation
from the Internet, or upload a new animation using multipart/form-data
:type animation: :obj:`typing.Union[base.InputFile, base.String]`
:param duration: Duration of sent animation in seconds
:type duration: :obj:`typing.Union[base.Integer, None]`
:param width: Animation width
:type width: :obj:`typing.Union[base.Integer, None]`
:param height: Animation height
:type height: :obj:`typing.Union[base.Integer, None]`
:param thumb: Thumbnail of the file sent. The thumbnail should be in JPEG format and less than 200 kB in size.
A thumbnail‘s width and height should not exceed 90.
:type thumb: :obj:`typing.Union[typing.Union[base.InputFile, base.String], None]`
:param caption: Animation caption (may also be used when resending animation by file_id), 0-1024 characters
:type caption: :obj:`typing.Union[base.String, None]`
:param parse_mode: Send Markdown or HTML, if you want Telegram apps to show bold, italic,
fixed-width text or inline URLs in the media caption
:type parse_mode: :obj:`typing.Union[base.String, None]`
:param disable_notification: Sends the message silently. Users will receive a notification with no sound
:type disable_notification: :obj:`typing.Union[base.Boolean, None]`
:param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard,
custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user
:type reply_markup: :obj:`typing.Union[typing.Union[types.InlineKeyboardMarkup, types.ReplyKeyboardMarkup,
types.ReplyKeyboardRemove, types.ForceReply], None]`
:param reply: fill 'reply_to_message_id'
:return: On success, the sent Message is returned
:rtype: :obj:`types.Message`
"""
warn_deprecated('"Message.send_animation" method will be removed in 2.2 version.\n'
'Use "Message.reply_animation" instead.',
stacklevel=8)
return await self.bot.send_animation(self.chat.id, animation=animation,
duration=duration,
width=width,
height=height,
thumb=thumb,
caption=caption,
parse_mode=parse_mode,
disable_notification=disable_notification,
reply_to_message_id=self.message_id if reply else None,
reply_markup=reply_markup
) | Use this method to send animation files (GIF or H.264/MPEG-4 AVC video without sound).
On success, the sent Message is returned.
Bots can currently send animation files of up to 50 MB in size, this limit may be changed in the future.
Source https://core.telegram.org/bots/api#sendanimation
:param animation: Animation to send. Pass a file_id as String to send an animation that exists
on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get an animation
from the Internet, or upload a new animation using multipart/form-data
:type animation: :obj:`typing.Union[base.InputFile, base.String]`
:param duration: Duration of sent animation in seconds
:type duration: :obj:`typing.Union[base.Integer, None]`
:param width: Animation width
:type width: :obj:`typing.Union[base.Integer, None]`
:param height: Animation height
:type height: :obj:`typing.Union[base.Integer, None]`
:param thumb: Thumbnail of the file sent. The thumbnail should be in JPEG format and less than 200 kB in size.
A thumbnail‘s width and height should not exceed 90.
:type thumb: :obj:`typing.Union[typing.Union[base.InputFile, base.String], None]`
:param caption: Animation caption (may also be used when resending animation by file_id), 0-1024 characters
:type caption: :obj:`typing.Union[base.String, None]`
:param parse_mode: Send Markdown or HTML, if you want Telegram apps to show bold, italic,
fixed-width text or inline URLs in the media caption
:type parse_mode: :obj:`typing.Union[base.String, None]`
:param disable_notification: Sends the message silently. Users will receive a notification with no sound
:type disable_notification: :obj:`typing.Union[base.Boolean, None]`
:param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard,
custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user
:type reply_markup: :obj:`typing.Union[typing.Union[types.InlineKeyboardMarkup, types.ReplyKeyboardMarkup,
types.ReplyKeyboardRemove, types.ForceReply], None]`
:param reply: fill 'reply_to_message_id'
:return: On success, the sent Message is returned
:rtype: :obj:`types.Message` | Below is the the instruction that describes the task:
### Input:
Use this method to send animation files (GIF or H.264/MPEG-4 AVC video without sound).
On success, the sent Message is returned.
Bots can currently send animation files of up to 50 MB in size, this limit may be changed in the future.
Source https://core.telegram.org/bots/api#sendanimation
:param animation: Animation to send. Pass a file_id as String to send an animation that exists
on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get an animation
from the Internet, or upload a new animation using multipart/form-data
:type animation: :obj:`typing.Union[base.InputFile, base.String]`
:param duration: Duration of sent animation in seconds
:type duration: :obj:`typing.Union[base.Integer, None]`
:param width: Animation width
:type width: :obj:`typing.Union[base.Integer, None]`
:param height: Animation height
:type height: :obj:`typing.Union[base.Integer, None]`
:param thumb: Thumbnail of the file sent. The thumbnail should be in JPEG format and less than 200 kB in size.
A thumbnail‘s width and height should not exceed 90.
:type thumb: :obj:`typing.Union[typing.Union[base.InputFile, base.String], None]`
:param caption: Animation caption (may also be used when resending animation by file_id), 0-1024 characters
:type caption: :obj:`typing.Union[base.String, None]`
:param parse_mode: Send Markdown or HTML, if you want Telegram apps to show bold, italic,
fixed-width text or inline URLs in the media caption
:type parse_mode: :obj:`typing.Union[base.String, None]`
:param disable_notification: Sends the message silently. Users will receive a notification with no sound
:type disable_notification: :obj:`typing.Union[base.Boolean, None]`
:param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard,
custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user
:type reply_markup: :obj:`typing.Union[typing.Union[types.InlineKeyboardMarkup, types.ReplyKeyboardMarkup,
types.ReplyKeyboardRemove, types.ForceReply], None]`
:param reply: fill 'reply_to_message_id'
:return: On success, the sent Message is returned
:rtype: :obj:`types.Message`
### Response:
async def send_animation(self,
animation: typing.Union[base.InputFile, base.String],
duration: typing.Union[base.Integer, None] = None,
width: typing.Union[base.Integer, None] = None,
height: typing.Union[base.Integer, None] = None,
thumb: typing.Union[typing.Union[base.InputFile, base.String], None] = None,
caption: typing.Union[base.String, None] = None,
parse_mode: typing.Union[base.String, None] = None,
disable_notification: typing.Union[base.Boolean, None] = None,
reply_markup=None,
reply=True) -> Message:
"""
Use this method to send animation files (GIF or H.264/MPEG-4 AVC video without sound).
On success, the sent Message is returned.
Bots can currently send animation files of up to 50 MB in size, this limit may be changed in the future.
Source https://core.telegram.org/bots/api#sendanimation
:param animation: Animation to send. Pass a file_id as String to send an animation that exists
on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get an animation
from the Internet, or upload a new animation using multipart/form-data
:type animation: :obj:`typing.Union[base.InputFile, base.String]`
:param duration: Duration of sent animation in seconds
:type duration: :obj:`typing.Union[base.Integer, None]`
:param width: Animation width
:type width: :obj:`typing.Union[base.Integer, None]`
:param height: Animation height
:type height: :obj:`typing.Union[base.Integer, None]`
:param thumb: Thumbnail of the file sent. The thumbnail should be in JPEG format and less than 200 kB in size.
A thumbnail‘s width and height should not exceed 90.
:type thumb: :obj:`typing.Union[typing.Union[base.InputFile, base.String], None]`
:param caption: Animation caption (may also be used when resending animation by file_id), 0-1024 characters
:type caption: :obj:`typing.Union[base.String, None]`
:param parse_mode: Send Markdown or HTML, if you want Telegram apps to show bold, italic,
fixed-width text or inline URLs in the media caption
:type parse_mode: :obj:`typing.Union[base.String, None]`
:param disable_notification: Sends the message silently. Users will receive a notification with no sound
:type disable_notification: :obj:`typing.Union[base.Boolean, None]`
:param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard,
custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user
:type reply_markup: :obj:`typing.Union[typing.Union[types.InlineKeyboardMarkup, types.ReplyKeyboardMarkup,
types.ReplyKeyboardRemove, types.ForceReply], None]`
:param reply: fill 'reply_to_message_id'
:return: On success, the sent Message is returned
:rtype: :obj:`types.Message`
"""
warn_deprecated('"Message.send_animation" method will be removed in 2.2 version.\n'
'Use "Message.reply_animation" instead.',
stacklevel=8)
return await self.bot.send_animation(self.chat.id, animation=animation,
duration=duration,
width=width,
height=height,
thumb=thumb,
caption=caption,
parse_mode=parse_mode,
disable_notification=disable_notification,
reply_to_message_id=self.message_id if reply else None,
reply_markup=reply_markup
) |
def reset_stats_history(self):
"""Reset the stats history (dict of GlancesAttribute)."""
if self.history_enable():
reset_list = [a['name'] for a in self.get_items_history_list()]
logger.debug("Reset history for plugin {} (items: {})".format(self.plugin_name, reset_list))
self.stats_history.reset() | Reset the stats history (dict of GlancesAttribute). | Below is the the instruction that describes the task:
### Input:
Reset the stats history (dict of GlancesAttribute).
### Response:
def reset_stats_history(self):
"""Reset the stats history (dict of GlancesAttribute)."""
if self.history_enable():
reset_list = [a['name'] for a in self.get_items_history_list()]
logger.debug("Reset history for plugin {} (items: {})".format(self.plugin_name, reset_list))
self.stats_history.reset() |
def sg_lookup(tensor, opt):
r"""Looks up the `tensor`, which is the embedding matrix.
Args:
tensor: A tensor ( automatically given by chain )
opt:
emb: A 2-D `Tensor`. An embedding matrix.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
assert opt.emb is not None, 'emb is mandatory.'
return tf.nn.embedding_lookup(opt.emb, tensor, name=opt.name) | r"""Looks up the `tensor`, which is the embedding matrix.
Args:
tensor: A tensor ( automatically given by chain )
opt:
emb: A 2-D `Tensor`. An embedding matrix.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`. | Below is the the instruction that describes the task:
### Input:
r"""Looks up the `tensor`, which is the embedding matrix.
Args:
tensor: A tensor ( automatically given by chain )
opt:
emb: A 2-D `Tensor`. An embedding matrix.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
### Response:
def sg_lookup(tensor, opt):
r"""Looks up the `tensor`, which is the embedding matrix.
Args:
tensor: A tensor ( automatically given by chain )
opt:
emb: A 2-D `Tensor`. An embedding matrix.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
assert opt.emb is not None, 'emb is mandatory.'
return tf.nn.embedding_lookup(opt.emb, tensor, name=opt.name) |
def vinet_k_num(v, v0, k0, k0p, precision=1.e-5):
"""
calculate bulk modulus numerically from volume, not pressure
according to test this differs from analytical result by 1.e-5
:param v: unit-cell volume in A^3
:param v0: unit-cell volume in A^3 at 1 bar
:param k0: bulk modulus at reference conditions
:param k0p: pressure derivative of bulk modulus at reference conditions
:param precision: precision for numerical calc (default = 1.e-5 * v0)
:return: dP/dV
"""
return -1. * v * vinet_dPdV(v, v0, k0, k0p, precision=precision) | calculate bulk modulus numerically from volume, not pressure
according to test this differs from analytical result by 1.e-5
:param v: unit-cell volume in A^3
:param v0: unit-cell volume in A^3 at 1 bar
:param k0: bulk modulus at reference conditions
:param k0p: pressure derivative of bulk modulus at reference conditions
:param precision: precision for numerical calc (default = 1.e-5 * v0)
:return: dP/dV | Below is the the instruction that describes the task:
### Input:
calculate bulk modulus numerically from volume, not pressure
according to test this differs from analytical result by 1.e-5
:param v: unit-cell volume in A^3
:param v0: unit-cell volume in A^3 at 1 bar
:param k0: bulk modulus at reference conditions
:param k0p: pressure derivative of bulk modulus at reference conditions
:param precision: precision for numerical calc (default = 1.e-5 * v0)
:return: dP/dV
### Response:
def vinet_k_num(v, v0, k0, k0p, precision=1.e-5):
"""
calculate bulk modulus numerically from volume, not pressure
according to test this differs from analytical result by 1.e-5
:param v: unit-cell volume in A^3
:param v0: unit-cell volume in A^3 at 1 bar
:param k0: bulk modulus at reference conditions
:param k0p: pressure derivative of bulk modulus at reference conditions
:param precision: precision for numerical calc (default = 1.e-5 * v0)
:return: dP/dV
"""
return -1. * v * vinet_dPdV(v, v0, k0, k0p, precision=precision) |
def get_child_ids(self, parent_alias):
"""Returns child IDs of the given parent category
:param str parent_alias: Parent category alias
:rtype: list
:return: a list of child IDs
"""
self._cache_init()
return self._cache_get_entry(self.CACHE_NAME_PARENTS, parent_alias, []) | Returns child IDs of the given parent category
:param str parent_alias: Parent category alias
:rtype: list
:return: a list of child IDs | Below is the the instruction that describes the task:
### Input:
Returns child IDs of the given parent category
:param str parent_alias: Parent category alias
:rtype: list
:return: a list of child IDs
### Response:
def get_child_ids(self, parent_alias):
"""Returns child IDs of the given parent category
:param str parent_alias: Parent category alias
:rtype: list
:return: a list of child IDs
"""
self._cache_init()
return self._cache_get_entry(self.CACHE_NAME_PARENTS, parent_alias, []) |
def _variable(lexer):
"""Return a variable expression."""
names = _names(lexer)
tok = next(lexer)
# NAMES '[' ... ']'
if isinstance(tok, LBRACK):
indices = _indices(lexer)
_expect_token(lexer, {RBRACK})
# NAMES
else:
lexer.unpop_token(tok)
indices = tuple()
return ('var', names, indices) | Return a variable expression. | Below is the the instruction that describes the task:
### Input:
Return a variable expression.
### Response:
def _variable(lexer):
"""Return a variable expression."""
names = _names(lexer)
tok = next(lexer)
# NAMES '[' ... ']'
if isinstance(tok, LBRACK):
indices = _indices(lexer)
_expect_token(lexer, {RBRACK})
# NAMES
else:
lexer.unpop_token(tok)
indices = tuple()
return ('var', names, indices) |
def _process_container_metric(self, type, metric_name, metric, scraper_config):
"""
Takes a simple metric about a container, reports it as a rate or gauge.
If several series are found for a given container, values are summed before submission.
"""
if metric.type not in METRIC_TYPES:
self.log.error("Metric type %s unsupported for metric %s" % (metric.type, metric.name))
return
samples = self._sum_values_by_context(metric, self._get_entity_id_if_container_metric)
for c_id, sample in iteritems(samples):
pod_uid = self._get_pod_uid(sample[self.SAMPLE_LABELS])
if self.pod_list_utils.is_excluded(c_id, pod_uid):
continue
tags = tagger.tag(c_id, tagger.HIGH)
tags += scraper_config['custom_tags']
# FIXME we are forced to do that because the Kubelet PodList isn't updated
# for static pods, see https://github.com/kubernetes/kubernetes/pull/59948
pod = self._get_pod_by_metric_label(sample[self.SAMPLE_LABELS])
if pod is not None and is_static_pending_pod(pod):
tags += tagger.tag('kubernetes_pod://%s' % pod["metadata"]["uid"], tagger.HIGH)
tags += self._get_kube_container_name(sample[self.SAMPLE_LABELS])
tags = list(set(tags))
val = sample[self.SAMPLE_VALUE]
if "rate" == type:
self.rate(metric_name, val, tags)
elif "gauge" == type:
self.gauge(metric_name, val, tags) | Takes a simple metric about a container, reports it as a rate or gauge.
If several series are found for a given container, values are summed before submission. | Below is the the instruction that describes the task:
### Input:
Takes a simple metric about a container, reports it as a rate or gauge.
If several series are found for a given container, values are summed before submission.
### Response:
def _process_container_metric(self, type, metric_name, metric, scraper_config):
"""
Takes a simple metric about a container, reports it as a rate or gauge.
If several series are found for a given container, values are summed before submission.
"""
if metric.type not in METRIC_TYPES:
self.log.error("Metric type %s unsupported for metric %s" % (metric.type, metric.name))
return
samples = self._sum_values_by_context(metric, self._get_entity_id_if_container_metric)
for c_id, sample in iteritems(samples):
pod_uid = self._get_pod_uid(sample[self.SAMPLE_LABELS])
if self.pod_list_utils.is_excluded(c_id, pod_uid):
continue
tags = tagger.tag(c_id, tagger.HIGH)
tags += scraper_config['custom_tags']
# FIXME we are forced to do that because the Kubelet PodList isn't updated
# for static pods, see https://github.com/kubernetes/kubernetes/pull/59948
pod = self._get_pod_by_metric_label(sample[self.SAMPLE_LABELS])
if pod is not None and is_static_pending_pod(pod):
tags += tagger.tag('kubernetes_pod://%s' % pod["metadata"]["uid"], tagger.HIGH)
tags += self._get_kube_container_name(sample[self.SAMPLE_LABELS])
tags = list(set(tags))
val = sample[self.SAMPLE_VALUE]
if "rate" == type:
self.rate(metric_name, val, tags)
elif "gauge" == type:
self.gauge(metric_name, val, tags) |
def open_in_composer(self):
"""Open in layout designer a given MapReport instance.
.. versionadded: 4.3.0
"""
impact_layer = self.impact_function.analysis_impacted
report_path = dirname(impact_layer.source())
impact_report = self.impact_function.impact_report
custom_map_report_metadata = impact_report.metadata
custom_map_report_product = (
custom_map_report_metadata.component_by_tags(
[final_product_tag, pdf_product_tag]))
for template_path in self.retrieve_paths(
custom_map_report_product,
report_path=report_path,
suffix='.qpt'):
layout = QgsPrintLayout(QgsProject.instance())
with open(template_path) as template_file:
template_content = template_file.read()
document = QtXml.QDomDocument()
document.setContent(template_content)
# load layout object
rwcontext = QgsReadWriteContext()
load_status = layout.loadFromTemplate(document, rwcontext)
if not load_status:
# noinspection PyCallByClass,PyTypeChecker
QtWidgets.QMessageBox.warning(
self,
tr('InaSAFE'),
tr('Error loading template: %s') % template_path)
return
QgsProject.instance().layoutManager().addLayout(layout)
self.iface.openLayoutDesigner(layout) | Open in layout designer a given MapReport instance.
.. versionadded: 4.3.0 | Below is the the instruction that describes the task:
### Input:
Open in layout designer a given MapReport instance.
.. versionadded: 4.3.0
### Response:
def open_in_composer(self):
"""Open in layout designer a given MapReport instance.
.. versionadded: 4.3.0
"""
impact_layer = self.impact_function.analysis_impacted
report_path = dirname(impact_layer.source())
impact_report = self.impact_function.impact_report
custom_map_report_metadata = impact_report.metadata
custom_map_report_product = (
custom_map_report_metadata.component_by_tags(
[final_product_tag, pdf_product_tag]))
for template_path in self.retrieve_paths(
custom_map_report_product,
report_path=report_path,
suffix='.qpt'):
layout = QgsPrintLayout(QgsProject.instance())
with open(template_path) as template_file:
template_content = template_file.read()
document = QtXml.QDomDocument()
document.setContent(template_content)
# load layout object
rwcontext = QgsReadWriteContext()
load_status = layout.loadFromTemplate(document, rwcontext)
if not load_status:
# noinspection PyCallByClass,PyTypeChecker
QtWidgets.QMessageBox.warning(
self,
tr('InaSAFE'),
tr('Error loading template: %s') % template_path)
return
QgsProject.instance().layoutManager().addLayout(layout)
self.iface.openLayoutDesigner(layout) |
def replicated_dataset(dataset, weights, n=None):
"Copy dataset, replicating each example in proportion to its weight."
n = n or len(dataset.examples)
result = copy.copy(dataset)
result.examples = weighted_replicate(dataset.examples, weights, n)
return result | Copy dataset, replicating each example in proportion to its weight. | Below is the the instruction that describes the task:
### Input:
Copy dataset, replicating each example in proportion to its weight.
### Response:
def replicated_dataset(dataset, weights, n=None):
"Copy dataset, replicating each example in proportion to its weight."
n = n or len(dataset.examples)
result = copy.copy(dataset)
result.examples = weighted_replicate(dataset.examples, weights, n)
return result |
def collect(self):
"""Collect statistics from /proc/self/mountstats.
Currently, we do fairly naive parsing and do not actually check
the statvers value returned by mountstats.
"""
if str_to_bool(self.config['use_sudo']):
if not os.access(self.config['sudo_cmd'], os.X_OK):
self.log.error("Cannot find or exec %s"
% self.config['sudo_cmd'])
return None
command = [self.config['sudo_cmd'], '/bin/cat', self.MOUNTSTATS]
p = subprocess.Popen(command,
stdout=subprocess.PIPE).communicate()[0][:-1]
lines = p.split("\n")
else:
if not os.access(self.MOUNTSTATS, os.R_OK):
self.log.error("Cannot read path %s" % self.MOUNTSTATS)
return None
f = open(self.MOUNTSTATS)
lines = f.readlines()
f.close()
path = None
for line in lines:
tokens = line.split()
if len(tokens) == 0:
continue
if tokens[0] == 'device':
path = tokens[4]
skip = False
if self.exclude_reg:
skip = self.exclude_reg.match(path)
if self.include_reg:
skip = not self.include_reg.match(path)
if skip:
self.log.debug("Ignoring %s", path)
else:
self.log.debug("Keeping %s", path)
path = path.replace('.', '_')
path = path.replace('/', '_')
elif skip:
# If we are in a skip state, don't pay any attention to
# anything that isn't the next device line
continue
elif tokens[0] == 'events:':
for i in range(0, len(self.EVENTS_MAP)):
metric_name = "%s.events.%s" % (path, self.EVENTS_MAP[i])
metric_value = long(tokens[i + 1])
self.publish_counter(metric_name, metric_value)
elif tokens[0] == 'bytes:':
for i in range(0, len(self.BYTES_MAP)):
metric_name = "%s.bytes.%s" % (path, self.BYTES_MAP[i])
metric_value = long(tokens[i + 1])
self.publish_counter(metric_name, metric_value)
elif tokens[0] == 'xprt:':
proto = tokens[1]
if not self.XPRT_MAP[proto]:
self.log.error("Unknown protocol %s", proto)
continue
for i in range(0, len(self.XPRT_MAP[proto])):
metric_name = "%s.xprt.%s.%s" % (path, proto,
self.XPRT_MAP[proto][i])
metric_value = long(tokens[i + 2])
self.publish_counter(metric_name, metric_value)
elif tokens[0][:-1] in self.RPCS_MAP:
rpc = tokens[0][:-1]
ops = long(tokens[1])
rtt = long(tokens[7])
exe = long(tokens[8])
metric_fmt = "%s.rpc.%s.%s"
ops_name = metric_fmt % (path, rpc.lower(), 'ops')
rtt_name = metric_fmt % (path, rpc.lower(), 'rtt')
exe_name = metric_fmt % (path, rpc.lower(), 'exe')
self.publish_counter(ops_name, ops)
self.publish_counter(rtt_name, rtt)
self.publish_counter(exe_name, exe) | Collect statistics from /proc/self/mountstats.
Currently, we do fairly naive parsing and do not actually check
the statvers value returned by mountstats. | Below is the the instruction that describes the task:
### Input:
Collect statistics from /proc/self/mountstats.
Currently, we do fairly naive parsing and do not actually check
the statvers value returned by mountstats.
### Response:
def collect(self):
"""Collect statistics from /proc/self/mountstats.
Currently, we do fairly naive parsing and do not actually check
the statvers value returned by mountstats.
"""
if str_to_bool(self.config['use_sudo']):
if not os.access(self.config['sudo_cmd'], os.X_OK):
self.log.error("Cannot find or exec %s"
% self.config['sudo_cmd'])
return None
command = [self.config['sudo_cmd'], '/bin/cat', self.MOUNTSTATS]
p = subprocess.Popen(command,
stdout=subprocess.PIPE).communicate()[0][:-1]
lines = p.split("\n")
else:
if not os.access(self.MOUNTSTATS, os.R_OK):
self.log.error("Cannot read path %s" % self.MOUNTSTATS)
return None
f = open(self.MOUNTSTATS)
lines = f.readlines()
f.close()
path = None
for line in lines:
tokens = line.split()
if len(tokens) == 0:
continue
if tokens[0] == 'device':
path = tokens[4]
skip = False
if self.exclude_reg:
skip = self.exclude_reg.match(path)
if self.include_reg:
skip = not self.include_reg.match(path)
if skip:
self.log.debug("Ignoring %s", path)
else:
self.log.debug("Keeping %s", path)
path = path.replace('.', '_')
path = path.replace('/', '_')
elif skip:
# If we are in a skip state, don't pay any attention to
# anything that isn't the next device line
continue
elif tokens[0] == 'events:':
for i in range(0, len(self.EVENTS_MAP)):
metric_name = "%s.events.%s" % (path, self.EVENTS_MAP[i])
metric_value = long(tokens[i + 1])
self.publish_counter(metric_name, metric_value)
elif tokens[0] == 'bytes:':
for i in range(0, len(self.BYTES_MAP)):
metric_name = "%s.bytes.%s" % (path, self.BYTES_MAP[i])
metric_value = long(tokens[i + 1])
self.publish_counter(metric_name, metric_value)
elif tokens[0] == 'xprt:':
proto = tokens[1]
if not self.XPRT_MAP[proto]:
self.log.error("Unknown protocol %s", proto)
continue
for i in range(0, len(self.XPRT_MAP[proto])):
metric_name = "%s.xprt.%s.%s" % (path, proto,
self.XPRT_MAP[proto][i])
metric_value = long(tokens[i + 2])
self.publish_counter(metric_name, metric_value)
elif tokens[0][:-1] in self.RPCS_MAP:
rpc = tokens[0][:-1]
ops = long(tokens[1])
rtt = long(tokens[7])
exe = long(tokens[8])
metric_fmt = "%s.rpc.%s.%s"
ops_name = metric_fmt % (path, rpc.lower(), 'ops')
rtt_name = metric_fmt % (path, rpc.lower(), 'rtt')
exe_name = metric_fmt % (path, rpc.lower(), 'exe')
self.publish_counter(ops_name, ops)
self.publish_counter(rtt_name, rtt)
self.publish_counter(exe_name, exe) |
def MFI(frame, n=14, high_col='high', low_col='low', close_col='close', vol_col='Volume'):
"""money flow inedx"""
return _frame_to_series(frame, [high_col, low_col, close_col, vol_col], talib.MFI, n) | money flow inedx | Below is the the instruction that describes the task:
### Input:
money flow inedx
### Response:
def MFI(frame, n=14, high_col='high', low_col='low', close_col='close', vol_col='Volume'):
"""money flow inedx"""
return _frame_to_series(frame, [high_col, low_col, close_col, vol_col], talib.MFI, n) |
def participate(self):
"""Finish reading and send text"""
try:
logger.info("Entering participate method")
ready = WebDriverWait(self.driver, 10).until(
EC.element_to_be_clickable((By.ID, "finish-reading"))
)
stimulus = self.driver.find_element_by_id("stimulus")
story = stimulus.find_element_by_id("story")
story_text = story.text
logger.info("Stimulus text:")
logger.info(story_text)
ready.click()
submit = WebDriverWait(self.driver, 10).until(
EC.element_to_be_clickable((By.ID, "submit-response"))
)
textarea = WebDriverWait(self.driver, 10).until(
EC.element_to_be_clickable((By.ID, "reproduction"))
)
textarea.clear()
text = self.transform_text(story_text)
logger.info("Transformed text:")
logger.info(text)
textarea.send_keys(text)
submit.click()
return True
except TimeoutException:
return False | Finish reading and send text | Below is the the instruction that describes the task:
### Input:
Finish reading and send text
### Response:
def participate(self):
"""Finish reading and send text"""
try:
logger.info("Entering participate method")
ready = WebDriverWait(self.driver, 10).until(
EC.element_to_be_clickable((By.ID, "finish-reading"))
)
stimulus = self.driver.find_element_by_id("stimulus")
story = stimulus.find_element_by_id("story")
story_text = story.text
logger.info("Stimulus text:")
logger.info(story_text)
ready.click()
submit = WebDriverWait(self.driver, 10).until(
EC.element_to_be_clickable((By.ID, "submit-response"))
)
textarea = WebDriverWait(self.driver, 10).until(
EC.element_to_be_clickable((By.ID, "reproduction"))
)
textarea.clear()
text = self.transform_text(story_text)
logger.info("Transformed text:")
logger.info(text)
textarea.send_keys(text)
submit.click()
return True
except TimeoutException:
return False |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.