code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def json(self):
"""
Return JSON representation of object.
"""
if self.meta_type == 'list':
ret = []
for dat in self._list:
if not isinstance(dat, composite):
ret.append(dat)
else:
ret.append(dat.json())
return ret
elif self.meta_type == 'dict':
ret = {}
for key in self._dict:
if not isinstance(self._dict[key], composite):
ret[key] = self._dict[key]
else:
ret[key] = self._dict[key].json()
return ret | Return JSON representation of object. | Below is the the instruction that describes the task:
### Input:
Return JSON representation of object.
### Response:
def json(self):
"""
Return JSON representation of object.
"""
if self.meta_type == 'list':
ret = []
for dat in self._list:
if not isinstance(dat, composite):
ret.append(dat)
else:
ret.append(dat.json())
return ret
elif self.meta_type == 'dict':
ret = {}
for key in self._dict:
if not isinstance(self._dict[key], composite):
ret[key] = self._dict[key]
else:
ret[key] = self._dict[key].json()
return ret |
def parse_date(datestring):
"""Attepmts to parse an ISO8601 formatted ``datestring``.
Returns a ``datetime.datetime`` object.
"""
datestring = str(datestring).strip()
if not datestring[0].isdigit():
raise ParseError()
if 'W' in datestring.upper():
try:
datestring = datestring[:-1] + str(int(datestring[-1:]) -1)
except:
pass
for regex, pattern in DATE_FORMATS:
if regex.match(datestring):
found = regex.search(datestring).groupdict()
dt = datetime.utcnow().strptime(found['matched'], pattern)
if 'fraction' in found and found['fraction'] is not None:
dt = dt.replace(microsecond=int(found['fraction'][1:]))
if 'timezone' in found and found['timezone'] is not None:
dt = dt.replace(tzinfo=Timezone(found.get('timezone', '')))
return dt
return parse_time(datestring) | Attepmts to parse an ISO8601 formatted ``datestring``.
Returns a ``datetime.datetime`` object. | Below is the the instruction that describes the task:
### Input:
Attepmts to parse an ISO8601 formatted ``datestring``.
Returns a ``datetime.datetime`` object.
### Response:
def parse_date(datestring):
"""Attepmts to parse an ISO8601 formatted ``datestring``.
Returns a ``datetime.datetime`` object.
"""
datestring = str(datestring).strip()
if not datestring[0].isdigit():
raise ParseError()
if 'W' in datestring.upper():
try:
datestring = datestring[:-1] + str(int(datestring[-1:]) -1)
except:
pass
for regex, pattern in DATE_FORMATS:
if regex.match(datestring):
found = regex.search(datestring).groupdict()
dt = datetime.utcnow().strptime(found['matched'], pattern)
if 'fraction' in found and found['fraction'] is not None:
dt = dt.replace(microsecond=int(found['fraction'][1:]))
if 'timezone' in found and found['timezone'] is not None:
dt = dt.replace(tzinfo=Timezone(found.get('timezone', '')))
return dt
return parse_time(datestring) |
def _generate_cpu_stats():
"""Read and display processor name """
cpu_name = urwid.Text("CPU Name N/A", align="center")
try:
cpu_name = urwid.Text(get_processor_name().strip(), align="center")
except OSError:
logging.info("CPU name not available")
return [urwid.Text(('bold text', "CPU Detected"),
align="center"), cpu_name, urwid.Divider()] | Read and display processor name | Below is the the instruction that describes the task:
### Input:
Read and display processor name
### Response:
def _generate_cpu_stats():
"""Read and display processor name """
cpu_name = urwid.Text("CPU Name N/A", align="center")
try:
cpu_name = urwid.Text(get_processor_name().strip(), align="center")
except OSError:
logging.info("CPU name not available")
return [urwid.Text(('bold text', "CPU Detected"),
align="center"), cpu_name, urwid.Divider()] |
def one_hot(cls, n, l):
"""
n: position of "hot"
l: lenght of vector
"""
v = [0.0] * l
v[n] = 1.0
return Vector(v) | n: position of "hot"
l: lenght of vector | Below is the the instruction that describes the task:
### Input:
n: position of "hot"
l: lenght of vector
### Response:
def one_hot(cls, n, l):
"""
n: position of "hot"
l: lenght of vector
"""
v = [0.0] * l
v[n] = 1.0
return Vector(v) |
def _compute_intra_event_alpha(self, C, vs30, pga1100):
"""
Returns the linearised functional relationship between fsite and
pga1100, determined from the partial derivative defined on equation 17
on page 148
"""
alpha = np.zeros_like(vs30, dtype=float)
idx = vs30 < C['k1']
if np.any(idx):
temp1 = (pga1100[idx] +
C['c'] * (vs30[idx] / C['k1']) ** C['n']) ** -1.
temp1 = temp1 - ((pga1100[idx] + C['c']) ** -1.)
alpha[idx] = C['k2'] * pga1100[idx] * temp1
return alpha | Returns the linearised functional relationship between fsite and
pga1100, determined from the partial derivative defined on equation 17
on page 148 | Below is the the instruction that describes the task:
### Input:
Returns the linearised functional relationship between fsite and
pga1100, determined from the partial derivative defined on equation 17
on page 148
### Response:
def _compute_intra_event_alpha(self, C, vs30, pga1100):
"""
Returns the linearised functional relationship between fsite and
pga1100, determined from the partial derivative defined on equation 17
on page 148
"""
alpha = np.zeros_like(vs30, dtype=float)
idx = vs30 < C['k1']
if np.any(idx):
temp1 = (pga1100[idx] +
C['c'] * (vs30[idx] / C['k1']) ** C['n']) ** -1.
temp1 = temp1 - ((pga1100[idx] + C['c']) ** -1.)
alpha[idx] = C['k2'] * pga1100[idx] * temp1
return alpha |
def RotateServerKey(cn=u"grr", keylength=4096):
"""This function creates and installs a new server key.
Note that
- Clients might experience intermittent connection problems after
the server keys rotated.
- It's not possible to go back to an earlier key. Clients that see a
new certificate will remember the cert's serial number and refuse
to accept any certificate with a smaller serial number from that
point on.
Args:
cn: The common name for the server to use.
keylength: Length in bits for the new server key.
Raises:
ValueError: There is no CA cert in the config. Probably the server
still needs to be initialized.
"""
ca_certificate = config.CONFIG["CA.certificate"]
ca_private_key = config.CONFIG["PrivateKeys.ca_key"]
if not ca_certificate or not ca_private_key:
raise ValueError("No existing CA certificate found.")
# Check the current certificate serial number
existing_cert = config.CONFIG["Frontend.certificate"]
serial_number = existing_cert.GetSerialNumber() + 1
EPrint("Generating new server key (%d bits, cn '%s', serial # %d)" %
(keylength, cn, serial_number))
server_private_key = rdf_crypto.RSAPrivateKey.GenerateKey(bits=keylength)
server_cert = key_utils.MakeCASignedCert(
str(cn),
server_private_key,
ca_certificate,
ca_private_key,
serial_number=serial_number)
EPrint("Updating configuration.")
config.CONFIG.Set("Frontend.certificate", server_cert.AsPEM())
config.CONFIG.Set("PrivateKeys.server_key", server_private_key.AsPEM())
config.CONFIG.Write()
EPrint("Server key rotated, please restart the GRR Frontends.") | This function creates and installs a new server key.
Note that
- Clients might experience intermittent connection problems after
the server keys rotated.
- It's not possible to go back to an earlier key. Clients that see a
new certificate will remember the cert's serial number and refuse
to accept any certificate with a smaller serial number from that
point on.
Args:
cn: The common name for the server to use.
keylength: Length in bits for the new server key.
Raises:
ValueError: There is no CA cert in the config. Probably the server
still needs to be initialized. | Below is the the instruction that describes the task:
### Input:
This function creates and installs a new server key.
Note that
- Clients might experience intermittent connection problems after
the server keys rotated.
- It's not possible to go back to an earlier key. Clients that see a
new certificate will remember the cert's serial number and refuse
to accept any certificate with a smaller serial number from that
point on.
Args:
cn: The common name for the server to use.
keylength: Length in bits for the new server key.
Raises:
ValueError: There is no CA cert in the config. Probably the server
still needs to be initialized.
### Response:
def RotateServerKey(cn=u"grr", keylength=4096):
"""This function creates and installs a new server key.
Note that
- Clients might experience intermittent connection problems after
the server keys rotated.
- It's not possible to go back to an earlier key. Clients that see a
new certificate will remember the cert's serial number and refuse
to accept any certificate with a smaller serial number from that
point on.
Args:
cn: The common name for the server to use.
keylength: Length in bits for the new server key.
Raises:
ValueError: There is no CA cert in the config. Probably the server
still needs to be initialized.
"""
ca_certificate = config.CONFIG["CA.certificate"]
ca_private_key = config.CONFIG["PrivateKeys.ca_key"]
if not ca_certificate or not ca_private_key:
raise ValueError("No existing CA certificate found.")
# Check the current certificate serial number
existing_cert = config.CONFIG["Frontend.certificate"]
serial_number = existing_cert.GetSerialNumber() + 1
EPrint("Generating new server key (%d bits, cn '%s', serial # %d)" %
(keylength, cn, serial_number))
server_private_key = rdf_crypto.RSAPrivateKey.GenerateKey(bits=keylength)
server_cert = key_utils.MakeCASignedCert(
str(cn),
server_private_key,
ca_certificate,
ca_private_key,
serial_number=serial_number)
EPrint("Updating configuration.")
config.CONFIG.Set("Frontend.certificate", server_cert.AsPEM())
config.CONFIG.Set("PrivateKeys.server_key", server_private_key.AsPEM())
config.CONFIG.Write()
EPrint("Server key rotated, please restart the GRR Frontends.") |
def update_token(self):
"""If a username and password are present - attempt to use them to
request a fresh SAS token.
"""
if not self.username or not self.password:
raise errors.TokenExpired("Unable to refresh token - no username or password.")
encoded_uri = compat.quote_plus(self.uri).encode(self._encoding) # pylint: disable=no-member
encoded_key = compat.quote_plus(self.username).encode(self._encoding) # pylint: disable=no-member
self.expires_at = time.time() + self.expires_in.seconds
self.token = utils.create_sas_token(
encoded_key,
self.password.encode(self._encoding),
encoded_uri,
self.expires_in) | If a username and password are present - attempt to use them to
request a fresh SAS token. | Below is the the instruction that describes the task:
### Input:
If a username and password are present - attempt to use them to
request a fresh SAS token.
### Response:
def update_token(self):
"""If a username and password are present - attempt to use them to
request a fresh SAS token.
"""
if not self.username or not self.password:
raise errors.TokenExpired("Unable to refresh token - no username or password.")
encoded_uri = compat.quote_plus(self.uri).encode(self._encoding) # pylint: disable=no-member
encoded_key = compat.quote_plus(self.username).encode(self._encoding) # pylint: disable=no-member
self.expires_at = time.time() + self.expires_in.seconds
self.token = utils.create_sas_token(
encoded_key,
self.password.encode(self._encoding),
encoded_uri,
self.expires_in) |
def add(self, name, *vals):
'''
Add values as iter() compatible items in the current scope frame.
'''
item = self.frames[-1].get(name)
if item is None:
self.frames[-1][name] = item = []
item.extend(vals) | Add values as iter() compatible items in the current scope frame. | Below is the the instruction that describes the task:
### Input:
Add values as iter() compatible items in the current scope frame.
### Response:
def add(self, name, *vals):
'''
Add values as iter() compatible items in the current scope frame.
'''
item = self.frames[-1].get(name)
if item is None:
self.frames[-1][name] = item = []
item.extend(vals) |
def _bytelist2longBigEndian(list):
"Transform a list of characters into a list of longs."
imax = len(list) // 4
hl = [0] * imax
j = 0
i = 0
while i < imax:
b0 = ord(list[j]) << 24
b1 = ord(list[j+1]) << 16
b2 = ord(list[j+2]) << 8
b3 = ord(list[j+3])
hl[i] = b0 | b1 | b2 | b3
i = i+1
j = j+4
return hl | Transform a list of characters into a list of longs. | Below is the the instruction that describes the task:
### Input:
Transform a list of characters into a list of longs.
### Response:
def _bytelist2longBigEndian(list):
"Transform a list of characters into a list of longs."
imax = len(list) // 4
hl = [0] * imax
j = 0
i = 0
while i < imax:
b0 = ord(list[j]) << 24
b1 = ord(list[j+1]) << 16
b2 = ord(list[j+2]) << 8
b3 = ord(list[j+3])
hl[i] = b0 | b1 | b2 | b3
i = i+1
j = j+4
return hl |
def parse_response(resp):
""" Method to parse response from the Optimizely API and
return results as JSON. Errors are thrown for various
errors that the API can throw.
"""
if resp.status_code in [200, 201, 202]:
return resp.json()
elif resp.status_code == 204:
return None
elif resp.status_code == 400:
raise error.BadRequestError(resp.text)
elif resp.status_code == 401:
raise error.UnauthorizedError(resp.text)
elif resp.status_code == 403:
raise error.ForbiddenError(resp.text)
elif resp.status_code == 404:
raise error.NotFoundError(resp.text)
elif resp.status_code == 429:
raise error.TooManyRequestsError(resp.text)
elif resp.status_code == 503:
raise error.ServiceUnavailableError(resp.text)
else:
raise error.OptimizelyError(resp.text) | Method to parse response from the Optimizely API and
return results as JSON. Errors are thrown for various
errors that the API can throw. | Below is the the instruction that describes the task:
### Input:
Method to parse response from the Optimizely API and
return results as JSON. Errors are thrown for various
errors that the API can throw.
### Response:
def parse_response(resp):
""" Method to parse response from the Optimizely API and
return results as JSON. Errors are thrown for various
errors that the API can throw.
"""
if resp.status_code in [200, 201, 202]:
return resp.json()
elif resp.status_code == 204:
return None
elif resp.status_code == 400:
raise error.BadRequestError(resp.text)
elif resp.status_code == 401:
raise error.UnauthorizedError(resp.text)
elif resp.status_code == 403:
raise error.ForbiddenError(resp.text)
elif resp.status_code == 404:
raise error.NotFoundError(resp.text)
elif resp.status_code == 429:
raise error.TooManyRequestsError(resp.text)
elif resp.status_code == 503:
raise error.ServiceUnavailableError(resp.text)
else:
raise error.OptimizelyError(resp.text) |
def _set_igmps_interface(self, v, load=False):
"""
Setter method for igmps_interface, mapped from YANG variable /interface_vlan/vlan/ip/igmpVlan/snooping/igmps_mrouter/igmps_interface (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_igmps_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_igmps_interface() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("igmps_if_type igmps_value",igmps_interface.igmps_interface, yang_name="igmps-interface", rest_name="interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='igmps-if-type igmps-value', extensions={u'tailf-common': {u'info': u'Interface to use', u'cli-suppress-mode': None, u'alt-name': u'interface', u'cli-suppress-list-no': None}}), is_container='list', yang_name="igmps-interface", rest_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Interface to use', u'cli-suppress-mode': None, u'alt-name': u'interface', u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-igmp-snooping', defining_module='brocade-igmp-snooping', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """igmps_interface must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("igmps_if_type igmps_value",igmps_interface.igmps_interface, yang_name="igmps-interface", rest_name="interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='igmps-if-type igmps-value', extensions={u'tailf-common': {u'info': u'Interface to use', u'cli-suppress-mode': None, u'alt-name': u'interface', u'cli-suppress-list-no': None}}), is_container='list', yang_name="igmps-interface", rest_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Interface to use', u'cli-suppress-mode': None, u'alt-name': u'interface', u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-igmp-snooping', defining_module='brocade-igmp-snooping', yang_type='list', is_config=True)""",
})
self.__igmps_interface = t
if hasattr(self, '_set'):
self._set() | Setter method for igmps_interface, mapped from YANG variable /interface_vlan/vlan/ip/igmpVlan/snooping/igmps_mrouter/igmps_interface (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_igmps_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_igmps_interface() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for igmps_interface, mapped from YANG variable /interface_vlan/vlan/ip/igmpVlan/snooping/igmps_mrouter/igmps_interface (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_igmps_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_igmps_interface() directly.
### Response:
def _set_igmps_interface(self, v, load=False):
"""
Setter method for igmps_interface, mapped from YANG variable /interface_vlan/vlan/ip/igmpVlan/snooping/igmps_mrouter/igmps_interface (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_igmps_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_igmps_interface() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("igmps_if_type igmps_value",igmps_interface.igmps_interface, yang_name="igmps-interface", rest_name="interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='igmps-if-type igmps-value', extensions={u'tailf-common': {u'info': u'Interface to use', u'cli-suppress-mode': None, u'alt-name': u'interface', u'cli-suppress-list-no': None}}), is_container='list', yang_name="igmps-interface", rest_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Interface to use', u'cli-suppress-mode': None, u'alt-name': u'interface', u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-igmp-snooping', defining_module='brocade-igmp-snooping', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """igmps_interface must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("igmps_if_type igmps_value",igmps_interface.igmps_interface, yang_name="igmps-interface", rest_name="interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='igmps-if-type igmps-value', extensions={u'tailf-common': {u'info': u'Interface to use', u'cli-suppress-mode': None, u'alt-name': u'interface', u'cli-suppress-list-no': None}}), is_container='list', yang_name="igmps-interface", rest_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Interface to use', u'cli-suppress-mode': None, u'alt-name': u'interface', u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-igmp-snooping', defining_module='brocade-igmp-snooping', yang_type='list', is_config=True)""",
})
self.__igmps_interface = t
if hasattr(self, '_set'):
self._set() |
def coords(self):
"""The current absolute coordinates of the touch event,
in mm from the top left corner of the device.
To get the corresponding output screen coordinates, use
:meth:`transform_coords`.
For events not of type :attr:`~libinput.constant.EventType.TOUCH_DOWN`,
:attr:`~libinput.constant.EventType.TOUCH_MOTION`, this property
raises :exc:`AttributeError`.
Returns:
(float, float): The current absolute (x, y) coordinates.
Raises:
AttributeError
"""
if self.type not in {EventType.TOUCH_DOWN, EventType.TOUCH_MOTION}:
raise AttributeError(_wrong_prop.format(self.type))
x = self._libinput.libinput_event_touch_get_x(self._handle)
y = self._libinput.libinput_event_touch_get_y(self._handle)
return x, y | The current absolute coordinates of the touch event,
in mm from the top left corner of the device.
To get the corresponding output screen coordinates, use
:meth:`transform_coords`.
For events not of type :attr:`~libinput.constant.EventType.TOUCH_DOWN`,
:attr:`~libinput.constant.EventType.TOUCH_MOTION`, this property
raises :exc:`AttributeError`.
Returns:
(float, float): The current absolute (x, y) coordinates.
Raises:
AttributeError | Below is the the instruction that describes the task:
### Input:
The current absolute coordinates of the touch event,
in mm from the top left corner of the device.
To get the corresponding output screen coordinates, use
:meth:`transform_coords`.
For events not of type :attr:`~libinput.constant.EventType.TOUCH_DOWN`,
:attr:`~libinput.constant.EventType.TOUCH_MOTION`, this property
raises :exc:`AttributeError`.
Returns:
(float, float): The current absolute (x, y) coordinates.
Raises:
AttributeError
### Response:
def coords(self):
"""The current absolute coordinates of the touch event,
in mm from the top left corner of the device.
To get the corresponding output screen coordinates, use
:meth:`transform_coords`.
For events not of type :attr:`~libinput.constant.EventType.TOUCH_DOWN`,
:attr:`~libinput.constant.EventType.TOUCH_MOTION`, this property
raises :exc:`AttributeError`.
Returns:
(float, float): The current absolute (x, y) coordinates.
Raises:
AttributeError
"""
if self.type not in {EventType.TOUCH_DOWN, EventType.TOUCH_MOTION}:
raise AttributeError(_wrong_prop.format(self.type))
x = self._libinput.libinput_event_touch_get_x(self._handle)
y = self._libinput.libinput_event_touch_get_y(self._handle)
return x, y |
def override_operation(self, operation, reason):
"""Re-Classify entry pair."""
prev_class = (self.local_classification, self.remote_classification)
prev_op = self.operation
assert operation != prev_op
assert operation in PAIR_OPERATIONS
if self.any_entry.target.synchronizer.verbose > 3:
write(
"override_operation({}, {}) -> {} ({})".format(
prev_class, prev_op, operation, reason
),
debug=True,
)
self.operation = operation
self.re_class_reason = reason | Re-Classify entry pair. | Below is the the instruction that describes the task:
### Input:
Re-Classify entry pair.
### Response:
def override_operation(self, operation, reason):
"""Re-Classify entry pair."""
prev_class = (self.local_classification, self.remote_classification)
prev_op = self.operation
assert operation != prev_op
assert operation in PAIR_OPERATIONS
if self.any_entry.target.synchronizer.verbose > 3:
write(
"override_operation({}, {}) -> {} ({})".format(
prev_class, prev_op, operation, reason
),
debug=True,
)
self.operation = operation
self.re_class_reason = reason |
def taskfileinfo_task_data(tfi, role):
"""Return the data for task
:param tfi: the :class:`jukeboxcore.filesys.TaskFileInfo` holds the data
:type tfi: :class:`jukeboxcore.filesys.TaskFileInfo`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the task
:rtype: depending on role
:raises: None
"""
task = tfi.task
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
return task.name | Return the data for task
:param tfi: the :class:`jukeboxcore.filesys.TaskFileInfo` holds the data
:type tfi: :class:`jukeboxcore.filesys.TaskFileInfo`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the task
:rtype: depending on role
:raises: None | Below is the the instruction that describes the task:
### Input:
Return the data for task
:param tfi: the :class:`jukeboxcore.filesys.TaskFileInfo` holds the data
:type tfi: :class:`jukeboxcore.filesys.TaskFileInfo`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the task
:rtype: depending on role
:raises: None
### Response:
def taskfileinfo_task_data(tfi, role):
"""Return the data for task
:param tfi: the :class:`jukeboxcore.filesys.TaskFileInfo` holds the data
:type tfi: :class:`jukeboxcore.filesys.TaskFileInfo`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the task
:rtype: depending on role
:raises: None
"""
task = tfi.task
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
return task.name |
def training_data(job_id):
'''Returns training_examples for a given job_id from offset to limit
If full_info parameter is greater than 0, will return extra architecture
info,
GET /jobs/139/vectors?offset=0&limit=10&full_info=1
{
"labeled_vectors": [{"vector":{"indices": {"0": 1}, "reductions": 3}, "label":0},
{"vector":{"indices": {"1": 1}, "reductions": 3}, "label":1},
...],
"vector_length": 3, # non-negative int or -1 if vector length is inconsistent
"num_labeled_vectors": 1600000, # non-negative int
"num_classes": 2, # pos integer, probably 2 or more
}
'''
offset = request.args.get('offset', 0)
limit = request.args.get('limit', 0)
cur.execute('SELECT vector,label FROM vectors WHERE job_id=%s OFFSET %s LIMIT %s',
(job_id, offset, limit))
training_examples = [{'vector':v,'label':l} for v,l in cur]
data = { 'labeled_vectors': training_examples }
if int(request.args.get('full_info', 0)) > 0:
cur.execute("SELECT vector->>'reductions' AS num_reductions FROM vectors WHERE job_id=%s GROUP BY num_reductions",
(job_id,))
unique_num_reductions = cur.fetchall() # [[5000]]
if len(unique_num_reductions) > 1:
# the vector length for this job is inconsistent! set vector_length
# to -1
data['vector_length'] = -1
else:
data['vector_length'] = unique_num_reductions[0][0]
cur.execute("SELECT count(*) FROM vectors WHERE job_id=%s",
(job_id,))
data['num_labeled_vectors'] = cur.fetchone()[0]
cur.execute("SELECT count(*) FROM (SELECT label FROM vectors WHERE job_id=%s GROUP BY label) AS all_vecs_for_job",
(job_id,))
data['num_classes'] = cur.fetchone()[0]
return jsonify(data) | Returns training_examples for a given job_id from offset to limit
If full_info parameter is greater than 0, will return extra architecture
info,
GET /jobs/139/vectors?offset=0&limit=10&full_info=1
{
"labeled_vectors": [{"vector":{"indices": {"0": 1}, "reductions": 3}, "label":0},
{"vector":{"indices": {"1": 1}, "reductions": 3}, "label":1},
...],
"vector_length": 3, # non-negative int or -1 if vector length is inconsistent
"num_labeled_vectors": 1600000, # non-negative int
"num_classes": 2, # pos integer, probably 2 or more
} | Below is the the instruction that describes the task:
### Input:
Returns training_examples for a given job_id from offset to limit
If full_info parameter is greater than 0, will return extra architecture
info,
GET /jobs/139/vectors?offset=0&limit=10&full_info=1
{
"labeled_vectors": [{"vector":{"indices": {"0": 1}, "reductions": 3}, "label":0},
{"vector":{"indices": {"1": 1}, "reductions": 3}, "label":1},
...],
"vector_length": 3, # non-negative int or -1 if vector length is inconsistent
"num_labeled_vectors": 1600000, # non-negative int
"num_classes": 2, # pos integer, probably 2 or more
}
### Response:
def training_data(job_id):
'''Returns training_examples for a given job_id from offset to limit
If full_info parameter is greater than 0, will return extra architecture
info,
GET /jobs/139/vectors?offset=0&limit=10&full_info=1
{
"labeled_vectors": [{"vector":{"indices": {"0": 1}, "reductions": 3}, "label":0},
{"vector":{"indices": {"1": 1}, "reductions": 3}, "label":1},
...],
"vector_length": 3, # non-negative int or -1 if vector length is inconsistent
"num_labeled_vectors": 1600000, # non-negative int
"num_classes": 2, # pos integer, probably 2 or more
}
'''
offset = request.args.get('offset', 0)
limit = request.args.get('limit', 0)
cur.execute('SELECT vector,label FROM vectors WHERE job_id=%s OFFSET %s LIMIT %s',
(job_id, offset, limit))
training_examples = [{'vector':v,'label':l} for v,l in cur]
data = { 'labeled_vectors': training_examples }
if int(request.args.get('full_info', 0)) > 0:
cur.execute("SELECT vector->>'reductions' AS num_reductions FROM vectors WHERE job_id=%s GROUP BY num_reductions",
(job_id,))
unique_num_reductions = cur.fetchall() # [[5000]]
if len(unique_num_reductions) > 1:
# the vector length for this job is inconsistent! set vector_length
# to -1
data['vector_length'] = -1
else:
data['vector_length'] = unique_num_reductions[0][0]
cur.execute("SELECT count(*) FROM vectors WHERE job_id=%s",
(job_id,))
data['num_labeled_vectors'] = cur.fetchone()[0]
cur.execute("SELECT count(*) FROM (SELECT label FROM vectors WHERE job_id=%s GROUP BY label) AS all_vecs_for_job",
(job_id,))
data['num_classes'] = cur.fetchone()[0]
return jsonify(data) |
def relocate_image(self, new_ImageBase):
"""Apply the relocation information to the image using the provided new image base.
This method will apply the relocation information to the image. Given the new base,
all the relocations will be processed and both the raw data and the section's data
will be fixed accordingly.
The resulting image can be retrieved as well through the method:
get_memory_mapped_image()
In order to get something that would more closely match what could be found in memory
once the Windows loader finished its work.
"""
relocation_difference = new_ImageBase - self.OPTIONAL_HEADER.ImageBase
if self.OPTIONAL_HEADER.DATA_DIRECTORY[5].Size:
if not hasattr(self, 'DIRECTORY_ENTRY_BASERELOC'):
self.parse_data_directories(
directories=[DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_BASERELOC']])
for reloc in self.DIRECTORY_ENTRY_BASERELOC:
virtual_address = reloc.struct.VirtualAddress
size_of_block = reloc.struct.SizeOfBlock
# We iterate with an index because if the relocation is of type
# IMAGE_REL_BASED_HIGHADJ we need to also process the next entry
# at once and skip it for the next iteration
#
entry_idx = 0
while entry_idx<len(reloc.entries):
entry = reloc.entries[entry_idx]
entry_idx += 1
if entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_ABSOLUTE']:
# Nothing to do for this type of relocation
pass
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_HIGH']:
# Fix the high 16-bits of a relocation
#
# Add high 16-bits of relocation_difference to the
# 16-bit value at RVA=entry.rva
self.set_word_at_rva(
entry.rva,
( self.get_word_at_rva(entry.rva) + relocation_difference>>16)&0xffff )
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_LOW']:
# Fix the low 16-bits of a relocation
#
# Add low 16 bits of relocation_difference to the 16-bit value
# at RVA=entry.rva
self.set_word_at_rva(
entry.rva,
( self.get_word_at_rva(entry.rva) + relocation_difference)&0xffff)
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_HIGHLOW']:
# Handle all high and low parts of a 32-bit relocation
#
# Add relocation_difference to the value at RVA=entry.rva
self.set_dword_at_rva(
entry.rva,
self.get_dword_at_rva(entry.rva)+relocation_difference)
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_HIGHADJ']:
# Fix the high 16-bits of a relocation and adjust
#
# Add high 16-bits of relocation_difference to the 32-bit value
# composed from the (16-bit value at RVA=entry.rva)<<16 plus
# the 16-bit value at the next relocation entry.
#
# If the next entry is beyond the array's limits,
# abort... the table is corrupt
#
if entry_idx == len(reloc.entries):
break
next_entry = reloc.entries[entry_idx]
entry_idx += 1
self.set_word_at_rva( entry.rva,
((self.get_word_at_rva(entry.rva)<<16) + next_entry.rva +
relocation_difference & 0xffff0000) >> 16 )
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_DIR64']:
# Apply the difference to the 64-bit value at the offset
# RVA=entry.rva
self.set_qword_at_rva(
entry.rva,
self.get_qword_at_rva(entry.rva) + relocation_difference)
self.OPTIONAL_HEADER.ImageBase = new_ImageBase
#correct VAs(virtual addresses) occurrences in directory information
if hasattr(self, 'IMAGE_DIRECTORY_ENTRY_IMPORT'):
for dll in self.DIRECTORY_ENTRY_IMPORT:
for func in dll.imports:
func.address += relocation_difference
if hasattr(self, 'IMAGE_DIRECTORY_ENTRY_TLS'):
self.DIRECTORY_ENTRY_TLS.struct.StartAddressOfRawData += relocation_difference
self.DIRECTORY_ENTRY_TLS.struct.EndAddressOfRawData += relocation_difference
self.DIRECTORY_ENTRY_TLS.struct.AddressOfIndex += relocation_difference
self.DIRECTORY_ENTRY_TLS.struct.AddressOfCallBacks += relocation_difference
if hasattr(self, 'IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG'):
if self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.LockPrefixTable:
self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.LockPrefixTable += relocation_difference
if self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.EditList:
self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.EditList += relocation_difference
if self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.SecurityCookie:
self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.SecurityCookie += relocation_difference
if self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.SEHandlerTable:
self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.SEHandlerTable += relocation_difference
if self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.GuardCFCheckFunctionPointer:
self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.GuardCFCheckFunctionPointer += relocation_difference
if self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.GuardCFFunctionTable:
self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.GuardCFFunctionTable += relocation_difference | Apply the relocation information to the image using the provided new image base.
This method will apply the relocation information to the image. Given the new base,
all the relocations will be processed and both the raw data and the section's data
will be fixed accordingly.
The resulting image can be retrieved as well through the method:
get_memory_mapped_image()
In order to get something that would more closely match what could be found in memory
once the Windows loader finished its work. | Below is the the instruction that describes the task:
### Input:
Apply the relocation information to the image using the provided new image base.
This method will apply the relocation information to the image. Given the new base,
all the relocations will be processed and both the raw data and the section's data
will be fixed accordingly.
The resulting image can be retrieved as well through the method:
get_memory_mapped_image()
In order to get something that would more closely match what could be found in memory
once the Windows loader finished its work.
### Response:
def relocate_image(self, new_ImageBase):
"""Apply the relocation information to the image using the provided new image base.
This method will apply the relocation information to the image. Given the new base,
all the relocations will be processed and both the raw data and the section's data
will be fixed accordingly.
The resulting image can be retrieved as well through the method:
get_memory_mapped_image()
In order to get something that would more closely match what could be found in memory
once the Windows loader finished its work.
"""
relocation_difference = new_ImageBase - self.OPTIONAL_HEADER.ImageBase
if self.OPTIONAL_HEADER.DATA_DIRECTORY[5].Size:
if not hasattr(self, 'DIRECTORY_ENTRY_BASERELOC'):
self.parse_data_directories(
directories=[DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_BASERELOC']])
for reloc in self.DIRECTORY_ENTRY_BASERELOC:
virtual_address = reloc.struct.VirtualAddress
size_of_block = reloc.struct.SizeOfBlock
# We iterate with an index because if the relocation is of type
# IMAGE_REL_BASED_HIGHADJ we need to also process the next entry
# at once and skip it for the next iteration
#
entry_idx = 0
while entry_idx<len(reloc.entries):
entry = reloc.entries[entry_idx]
entry_idx += 1
if entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_ABSOLUTE']:
# Nothing to do for this type of relocation
pass
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_HIGH']:
# Fix the high 16-bits of a relocation
#
# Add high 16-bits of relocation_difference to the
# 16-bit value at RVA=entry.rva
self.set_word_at_rva(
entry.rva,
( self.get_word_at_rva(entry.rva) + relocation_difference>>16)&0xffff )
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_LOW']:
# Fix the low 16-bits of a relocation
#
# Add low 16 bits of relocation_difference to the 16-bit value
# at RVA=entry.rva
self.set_word_at_rva(
entry.rva,
( self.get_word_at_rva(entry.rva) + relocation_difference)&0xffff)
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_HIGHLOW']:
# Handle all high and low parts of a 32-bit relocation
#
# Add relocation_difference to the value at RVA=entry.rva
self.set_dword_at_rva(
entry.rva,
self.get_dword_at_rva(entry.rva)+relocation_difference)
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_HIGHADJ']:
# Fix the high 16-bits of a relocation and adjust
#
# Add high 16-bits of relocation_difference to the 32-bit value
# composed from the (16-bit value at RVA=entry.rva)<<16 plus
# the 16-bit value at the next relocation entry.
#
# If the next entry is beyond the array's limits,
# abort... the table is corrupt
#
if entry_idx == len(reloc.entries):
break
next_entry = reloc.entries[entry_idx]
entry_idx += 1
self.set_word_at_rva( entry.rva,
((self.get_word_at_rva(entry.rva)<<16) + next_entry.rva +
relocation_difference & 0xffff0000) >> 16 )
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_DIR64']:
# Apply the difference to the 64-bit value at the offset
# RVA=entry.rva
self.set_qword_at_rva(
entry.rva,
self.get_qword_at_rva(entry.rva) + relocation_difference)
self.OPTIONAL_HEADER.ImageBase = new_ImageBase
#correct VAs(virtual addresses) occurrences in directory information
if hasattr(self, 'IMAGE_DIRECTORY_ENTRY_IMPORT'):
for dll in self.DIRECTORY_ENTRY_IMPORT:
for func in dll.imports:
func.address += relocation_difference
if hasattr(self, 'IMAGE_DIRECTORY_ENTRY_TLS'):
self.DIRECTORY_ENTRY_TLS.struct.StartAddressOfRawData += relocation_difference
self.DIRECTORY_ENTRY_TLS.struct.EndAddressOfRawData += relocation_difference
self.DIRECTORY_ENTRY_TLS.struct.AddressOfIndex += relocation_difference
self.DIRECTORY_ENTRY_TLS.struct.AddressOfCallBacks += relocation_difference
if hasattr(self, 'IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG'):
if self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.LockPrefixTable:
self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.LockPrefixTable += relocation_difference
if self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.EditList:
self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.EditList += relocation_difference
if self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.SecurityCookie:
self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.SecurityCookie += relocation_difference
if self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.SEHandlerTable:
self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.SEHandlerTable += relocation_difference
if self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.GuardCFCheckFunctionPointer:
self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.GuardCFCheckFunctionPointer += relocation_difference
if self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.GuardCFFunctionTable:
self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.GuardCFFunctionTable += relocation_difference |
def check(self, state, when):
"""
Checks state `state` to see if the breakpoint should fire.
:param state: The state.
:param when: Whether the check is happening before or after the event.
:return: A boolean representing whether the checkpoint should fire.
"""
ok = self.enabled and (when == self.when or self.when == BP_BOTH)
if not ok:
return ok
l.debug("... after enabled and when: %s", ok)
for a in [ _ for _ in self.kwargs if not _.endswith("_unique") ]:
current_expr = getattr(state.inspect, a)
needed = self.kwargs.get(a, None)
l.debug("... checking condition %s", a)
if current_expr is None and needed is None:
l.debug("...... both None, True")
c_ok = True
elif current_expr is not None and needed is not None:
if state.solver.solution(current_expr, needed):
l.debug("...... is_solution!")
c_ok = True
else:
l.debug("...... not solution...")
c_ok = False
if c_ok and self.kwargs.get(a+'_unique', True):
l.debug("...... checking uniqueness")
if not state.solver.unique(current_expr):
l.debug("...... not unique")
c_ok = False
else:
l.debug("...... one None, False")
c_ok = False
ok = ok and c_ok
if not ok:
return ok
l.debug("... after condition %s: %s", a, ok)
ok = ok and (self.condition is None or self.condition(state))
l.debug("... after condition func: %s", ok)
return ok | Checks state `state` to see if the breakpoint should fire.
:param state: The state.
:param when: Whether the check is happening before or after the event.
:return: A boolean representing whether the checkpoint should fire. | Below is the the instruction that describes the task:
### Input:
Checks state `state` to see if the breakpoint should fire.
:param state: The state.
:param when: Whether the check is happening before or after the event.
:return: A boolean representing whether the checkpoint should fire.
### Response:
def check(self, state, when):
"""
Checks state `state` to see if the breakpoint should fire.
:param state: The state.
:param when: Whether the check is happening before or after the event.
:return: A boolean representing whether the checkpoint should fire.
"""
ok = self.enabled and (when == self.when or self.when == BP_BOTH)
if not ok:
return ok
l.debug("... after enabled and when: %s", ok)
for a in [ _ for _ in self.kwargs if not _.endswith("_unique") ]:
current_expr = getattr(state.inspect, a)
needed = self.kwargs.get(a, None)
l.debug("... checking condition %s", a)
if current_expr is None and needed is None:
l.debug("...... both None, True")
c_ok = True
elif current_expr is not None and needed is not None:
if state.solver.solution(current_expr, needed):
l.debug("...... is_solution!")
c_ok = True
else:
l.debug("...... not solution...")
c_ok = False
if c_ok and self.kwargs.get(a+'_unique', True):
l.debug("...... checking uniqueness")
if not state.solver.unique(current_expr):
l.debug("...... not unique")
c_ok = False
else:
l.debug("...... one None, False")
c_ok = False
ok = ok and c_ok
if not ok:
return ok
l.debug("... after condition %s: %s", a, ok)
ok = ok and (self.condition is None or self.condition(state))
l.debug("... after condition func: %s", ok)
return ok |
def _search(self, search_state, include_current_position=False, count=1):
"""
Execute search. Return (working_index, cursor_position) tuple when this
search is applied. Returns `None` when this text cannot be found.
"""
assert isinstance(search_state, SearchState)
assert isinstance(count, int) and count > 0
text = search_state.text
direction = search_state.direction
ignore_case = search_state.ignore_case()
def search_once(working_index, document):
"""
Do search one time.
Return (working_index, document) or `None`
"""
if direction == IncrementalSearchDirection.FORWARD:
# Try find at the current input.
new_index = document.find(
text, include_current_position=include_current_position,
ignore_case=ignore_case)
if new_index is not None:
return (working_index,
Document(document.text, document.cursor_position + new_index))
else:
# No match, go forward in the history. (Include len+1 to wrap around.)
# (Here we should always include all cursor positions, because
# it's a different line.)
for i in range(working_index + 1, len(self._working_lines) + 1):
i %= len(self._working_lines)
document = Document(self._working_lines[i], 0)
new_index = document.find(text, include_current_position=True,
ignore_case=ignore_case)
if new_index is not None:
return (i, Document(document.text, new_index))
else:
# Try find at the current input.
new_index = document.find_backwards(
text, ignore_case=ignore_case)
if new_index is not None:
return (working_index,
Document(document.text, document.cursor_position + new_index))
else:
# No match, go back in the history. (Include -1 to wrap around.)
for i in range(working_index - 1, -2, -1):
i %= len(self._working_lines)
document = Document(self._working_lines[i], len(self._working_lines[i]))
new_index = document.find_backwards(
text, ignore_case=ignore_case)
if new_index is not None:
return (i, Document(document.text, len(document.text) + new_index))
# Do 'count' search iterations.
working_index = self.working_index
document = self.document
for _ in range(count):
result = search_once(working_index, document)
if result is None:
return # Nothing found.
else:
working_index, document = result
return (working_index, document.cursor_position) | Execute search. Return (working_index, cursor_position) tuple when this
search is applied. Returns `None` when this text cannot be found. | Below is the the instruction that describes the task:
### Input:
Execute search. Return (working_index, cursor_position) tuple when this
search is applied. Returns `None` when this text cannot be found.
### Response:
def _search(self, search_state, include_current_position=False, count=1):
"""
Execute search. Return (working_index, cursor_position) tuple when this
search is applied. Returns `None` when this text cannot be found.
"""
assert isinstance(search_state, SearchState)
assert isinstance(count, int) and count > 0
text = search_state.text
direction = search_state.direction
ignore_case = search_state.ignore_case()
def search_once(working_index, document):
"""
Do search one time.
Return (working_index, document) or `None`
"""
if direction == IncrementalSearchDirection.FORWARD:
# Try find at the current input.
new_index = document.find(
text, include_current_position=include_current_position,
ignore_case=ignore_case)
if new_index is not None:
return (working_index,
Document(document.text, document.cursor_position + new_index))
else:
# No match, go forward in the history. (Include len+1 to wrap around.)
# (Here we should always include all cursor positions, because
# it's a different line.)
for i in range(working_index + 1, len(self._working_lines) + 1):
i %= len(self._working_lines)
document = Document(self._working_lines[i], 0)
new_index = document.find(text, include_current_position=True,
ignore_case=ignore_case)
if new_index is not None:
return (i, Document(document.text, new_index))
else:
# Try find at the current input.
new_index = document.find_backwards(
text, ignore_case=ignore_case)
if new_index is not None:
return (working_index,
Document(document.text, document.cursor_position + new_index))
else:
# No match, go back in the history. (Include -1 to wrap around.)
for i in range(working_index - 1, -2, -1):
i %= len(self._working_lines)
document = Document(self._working_lines[i], len(self._working_lines[i]))
new_index = document.find_backwards(
text, ignore_case=ignore_case)
if new_index is not None:
return (i, Document(document.text, len(document.text) + new_index))
# Do 'count' search iterations.
working_index = self.working_index
document = self.document
for _ in range(count):
result = search_once(working_index, document)
if result is None:
return # Nothing found.
else:
working_index, document = result
return (working_index, document.cursor_position) |
def WaitForTasks(tasks,
raiseOnError=True,
si=None,
pc=None,
onProgressUpdate=None,
results=None):
"""
Wait for mulitiple tasks to complete. Much faster than calling WaitForTask
N times
"""
if not tasks:
return
if si is None:
si = vim.ServiceInstance("ServiceInstance", tasks[0]._stub)
if pc is None:
pc = si.content.propertyCollector
if results is None:
results = []
progressUpdaters = {}
for task in tasks:
progressUpdater = ProgressUpdater(task, onProgressUpdate)
progressUpdater.Update('created')
progressUpdaters[str(task)] = progressUpdater
filter = CreateTasksFilter(pc, tasks)
try:
version, state = None, None
# Loop looking for updates till the state moves to a completed state.
while len(progressUpdaters):
update = pc.WaitForUpdates(version)
for filterSet in update.filterSet:
for objSet in filterSet.objectSet:
task = objSet.obj
taskId = str(task)
for change in objSet.changeSet:
if change.name == 'info':
state = change.val.state
elif change.name == 'info.state':
state = change.val
else:
continue
progressUpdater = progressUpdaters.get(taskId)
if not progressUpdater:
continue
if state == vim.TaskInfo.State.success:
progressUpdater.Update('completed')
progressUpdaters.pop(taskId)
# cache the results, as task objects could expire if one
# of the tasks take a longer time to complete
results.append(task.info.result)
elif state == vim.TaskInfo.State.error:
err = task.info.error
progressUpdater.Update('error: %s' % str(err))
if raiseOnError:
raise err
else:
print("Task %s reported error: %s" % (taskId, str(err)))
progressUpdaters.pop(taskId)
else:
if onProgressUpdate:
progressUpdater.UpdateIfNeeded()
# Move to next version
version = update.version
finally:
if filter:
filter.Destroy()
return | Wait for mulitiple tasks to complete. Much faster than calling WaitForTask
N times | Below is the the instruction that describes the task:
### Input:
Wait for mulitiple tasks to complete. Much faster than calling WaitForTask
N times
### Response:
def WaitForTasks(tasks,
raiseOnError=True,
si=None,
pc=None,
onProgressUpdate=None,
results=None):
"""
Wait for mulitiple tasks to complete. Much faster than calling WaitForTask
N times
"""
if not tasks:
return
if si is None:
si = vim.ServiceInstance("ServiceInstance", tasks[0]._stub)
if pc is None:
pc = si.content.propertyCollector
if results is None:
results = []
progressUpdaters = {}
for task in tasks:
progressUpdater = ProgressUpdater(task, onProgressUpdate)
progressUpdater.Update('created')
progressUpdaters[str(task)] = progressUpdater
filter = CreateTasksFilter(pc, tasks)
try:
version, state = None, None
# Loop looking for updates till the state moves to a completed state.
while len(progressUpdaters):
update = pc.WaitForUpdates(version)
for filterSet in update.filterSet:
for objSet in filterSet.objectSet:
task = objSet.obj
taskId = str(task)
for change in objSet.changeSet:
if change.name == 'info':
state = change.val.state
elif change.name == 'info.state':
state = change.val
else:
continue
progressUpdater = progressUpdaters.get(taskId)
if not progressUpdater:
continue
if state == vim.TaskInfo.State.success:
progressUpdater.Update('completed')
progressUpdaters.pop(taskId)
# cache the results, as task objects could expire if one
# of the tasks take a longer time to complete
results.append(task.info.result)
elif state == vim.TaskInfo.State.error:
err = task.info.error
progressUpdater.Update('error: %s' % str(err))
if raiseOnError:
raise err
else:
print("Task %s reported error: %s" % (taskId, str(err)))
progressUpdaters.pop(taskId)
else:
if onProgressUpdate:
progressUpdater.UpdateIfNeeded()
# Move to next version
version = update.version
finally:
if filter:
filter.Destroy()
return |
def handle(request, message=None, redirect=None, ignore=False,
escalate=False, log_level=None, force_log=None):
"""Centralized error handling for Horizon.
Because Horizon consumes so many different APIs with completely
different ``Exception`` types, it's necessary to have a centralized
place for handling exceptions which may be raised.
Exceptions are roughly divided into 3 types:
#. ``UNAUTHORIZED``: Errors resulting from authentication or authorization
problems. These result in being logged out and sent to the login screen.
#. ``NOT_FOUND``: Errors resulting from objects which could not be
located via the API. These generally result in a user-facing error
message, but are otherwise returned to the normal code flow. Optionally
a redirect value may be passed to the error handler so users are
returned to a different view than the one requested in addition to the
error message.
#. ``RECOVERABLE``: Generic API errors which generate a user-facing message
but drop directly back to the regular code flow.
All other exceptions bubble the stack as normal unless the ``ignore``
argument is passed in as ``True``, in which case only unrecognized
errors are bubbled.
If the exception is not re-raised, an appropriate wrapper exception
class indicating the type of exception that was encountered will be
returned.
"""
exc_type, exc_value, exc_traceback = sys.exc_info()
log_method = getattr(LOG, log_level or "exception")
force_log = force_log or os.environ.get("HORIZON_TEST_RUN", False)
force_silence = getattr(exc_value, "silence_logging", False)
# Because the same exception may travel through this method more than
# once (if it's re-raised) we may want to treat it differently
# the second time (e.g. no user messages/logging).
handled = issubclass(exc_type, HandledException)
wrap = False
# Restore our original exception information, but re-wrap it at the end
if handled:
exc_type, exc_value, exc_traceback = exc_value.wrapped
wrap = True
log_entry = encoding.force_text(exc_value)
user_message = ""
# We trust messages from our own exceptions
if issubclass(exc_type, HorizonException):
user_message = log_entry
# If the message has a placeholder for the exception, fill it in
elif message and "%(exc)s" in message:
user_message = encoding.force_text(message) % {"exc": log_entry}
elif message:
user_message = encoding.force_text(message)
for exc_handler in HANDLE_EXC_METHODS:
if issubclass(exc_type, exc_handler['exc']):
if exc_handler['set_wrap']:
wrap = True
handler = exc_handler['handler']
ret = handler(request, user_message, redirect, ignore,
exc_handler.get('escalate', escalate),
handled, force_silence, force_log,
log_method, log_entry, log_level)
if ret:
return ret # return to normal code flow
# If we've gotten here, time to wrap and/or raise our exception.
if wrap:
raise HandledException([exc_type, exc_value, exc_traceback])
# assume exceptions handled in the code that pass in a message are already
# handled appropriately and treat as recoverable
if message:
ret = handle_recoverable(request, user_message, redirect, ignore,
escalate, handled, force_silence, force_log,
log_method, log_entry, log_level)
# pylint: disable=using-constant-test
if ret:
return ret
six.reraise(exc_type, exc_value, exc_traceback) | Centralized error handling for Horizon.
Because Horizon consumes so many different APIs with completely
different ``Exception`` types, it's necessary to have a centralized
place for handling exceptions which may be raised.
Exceptions are roughly divided into 3 types:
#. ``UNAUTHORIZED``: Errors resulting from authentication or authorization
problems. These result in being logged out and sent to the login screen.
#. ``NOT_FOUND``: Errors resulting from objects which could not be
located via the API. These generally result in a user-facing error
message, but are otherwise returned to the normal code flow. Optionally
a redirect value may be passed to the error handler so users are
returned to a different view than the one requested in addition to the
error message.
#. ``RECOVERABLE``: Generic API errors which generate a user-facing message
but drop directly back to the regular code flow.
All other exceptions bubble the stack as normal unless the ``ignore``
argument is passed in as ``True``, in which case only unrecognized
errors are bubbled.
If the exception is not re-raised, an appropriate wrapper exception
class indicating the type of exception that was encountered will be
returned. | Below is the the instruction that describes the task:
### Input:
Centralized error handling for Horizon.
Because Horizon consumes so many different APIs with completely
different ``Exception`` types, it's necessary to have a centralized
place for handling exceptions which may be raised.
Exceptions are roughly divided into 3 types:
#. ``UNAUTHORIZED``: Errors resulting from authentication or authorization
problems. These result in being logged out and sent to the login screen.
#. ``NOT_FOUND``: Errors resulting from objects which could not be
located via the API. These generally result in a user-facing error
message, but are otherwise returned to the normal code flow. Optionally
a redirect value may be passed to the error handler so users are
returned to a different view than the one requested in addition to the
error message.
#. ``RECOVERABLE``: Generic API errors which generate a user-facing message
but drop directly back to the regular code flow.
All other exceptions bubble the stack as normal unless the ``ignore``
argument is passed in as ``True``, in which case only unrecognized
errors are bubbled.
If the exception is not re-raised, an appropriate wrapper exception
class indicating the type of exception that was encountered will be
returned.
### Response:
def handle(request, message=None, redirect=None, ignore=False,
escalate=False, log_level=None, force_log=None):
"""Centralized error handling for Horizon.
Because Horizon consumes so many different APIs with completely
different ``Exception`` types, it's necessary to have a centralized
place for handling exceptions which may be raised.
Exceptions are roughly divided into 3 types:
#. ``UNAUTHORIZED``: Errors resulting from authentication or authorization
problems. These result in being logged out and sent to the login screen.
#. ``NOT_FOUND``: Errors resulting from objects which could not be
located via the API. These generally result in a user-facing error
message, but are otherwise returned to the normal code flow. Optionally
a redirect value may be passed to the error handler so users are
returned to a different view than the one requested in addition to the
error message.
#. ``RECOVERABLE``: Generic API errors which generate a user-facing message
but drop directly back to the regular code flow.
All other exceptions bubble the stack as normal unless the ``ignore``
argument is passed in as ``True``, in which case only unrecognized
errors are bubbled.
If the exception is not re-raised, an appropriate wrapper exception
class indicating the type of exception that was encountered will be
returned.
"""
exc_type, exc_value, exc_traceback = sys.exc_info()
log_method = getattr(LOG, log_level or "exception")
force_log = force_log or os.environ.get("HORIZON_TEST_RUN", False)
force_silence = getattr(exc_value, "silence_logging", False)
# Because the same exception may travel through this method more than
# once (if it's re-raised) we may want to treat it differently
# the second time (e.g. no user messages/logging).
handled = issubclass(exc_type, HandledException)
wrap = False
# Restore our original exception information, but re-wrap it at the end
if handled:
exc_type, exc_value, exc_traceback = exc_value.wrapped
wrap = True
log_entry = encoding.force_text(exc_value)
user_message = ""
# We trust messages from our own exceptions
if issubclass(exc_type, HorizonException):
user_message = log_entry
# If the message has a placeholder for the exception, fill it in
elif message and "%(exc)s" in message:
user_message = encoding.force_text(message) % {"exc": log_entry}
elif message:
user_message = encoding.force_text(message)
for exc_handler in HANDLE_EXC_METHODS:
if issubclass(exc_type, exc_handler['exc']):
if exc_handler['set_wrap']:
wrap = True
handler = exc_handler['handler']
ret = handler(request, user_message, redirect, ignore,
exc_handler.get('escalate', escalate),
handled, force_silence, force_log,
log_method, log_entry, log_level)
if ret:
return ret # return to normal code flow
# If we've gotten here, time to wrap and/or raise our exception.
if wrap:
raise HandledException([exc_type, exc_value, exc_traceback])
# assume exceptions handled in the code that pass in a message are already
# handled appropriately and treat as recoverable
if message:
ret = handle_recoverable(request, user_message, redirect, ignore,
escalate, handled, force_silence, force_log,
log_method, log_entry, log_level)
# pylint: disable=using-constant-test
if ret:
return ret
six.reraise(exc_type, exc_value, exc_traceback) |
def _build_url(self):
"""Build url based on searching by date or by show."""
url_params = [
BASE_URL, self.category + ' ratings', self.day, self.year, self.month
]
return SEARCH_URL.format(*url_params) | Build url based on searching by date or by show. | Below is the the instruction that describes the task:
### Input:
Build url based on searching by date or by show.
### Response:
def _build_url(self):
"""Build url based on searching by date or by show."""
url_params = [
BASE_URL, self.category + ' ratings', self.day, self.year, self.month
]
return SEARCH_URL.format(*url_params) |
def sort(self, ids):
"""
Sort the given list of identifiers,
returning a new (sorted) list.
:param list ids: the list of identifiers to be sorted
:rtype: list
"""
def extract_int(string):
"""
Extract an integer from the given string.
:param string string: the identifier string
:rtype: int
"""
return int(re.sub(r"[^0-9]", "", string))
tmp = list(ids)
if self.algorithm == IDSortingAlgorithm.UNSORTED:
self.log(u"Sorting using UNSORTED")
elif self.algorithm == IDSortingAlgorithm.LEXICOGRAPHIC:
self.log(u"Sorting using LEXICOGRAPHIC")
tmp = sorted(ids)
elif self.algorithm == IDSortingAlgorithm.NUMERIC:
self.log(u"Sorting using NUMERIC")
tmp = ids
try:
tmp = sorted(tmp, key=extract_int)
except (ValueError, TypeError) as exc:
self.log_exc(u"Not all id values contain a numeric part. Returning the id list unchanged.", exc, False, None)
return tmp | Sort the given list of identifiers,
returning a new (sorted) list.
:param list ids: the list of identifiers to be sorted
:rtype: list | Below is the the instruction that describes the task:
### Input:
Sort the given list of identifiers,
returning a new (sorted) list.
:param list ids: the list of identifiers to be sorted
:rtype: list
### Response:
def sort(self, ids):
"""
Sort the given list of identifiers,
returning a new (sorted) list.
:param list ids: the list of identifiers to be sorted
:rtype: list
"""
def extract_int(string):
"""
Extract an integer from the given string.
:param string string: the identifier string
:rtype: int
"""
return int(re.sub(r"[^0-9]", "", string))
tmp = list(ids)
if self.algorithm == IDSortingAlgorithm.UNSORTED:
self.log(u"Sorting using UNSORTED")
elif self.algorithm == IDSortingAlgorithm.LEXICOGRAPHIC:
self.log(u"Sorting using LEXICOGRAPHIC")
tmp = sorted(ids)
elif self.algorithm == IDSortingAlgorithm.NUMERIC:
self.log(u"Sorting using NUMERIC")
tmp = ids
try:
tmp = sorted(tmp, key=extract_int)
except (ValueError, TypeError) as exc:
self.log_exc(u"Not all id values contain a numeric part. Returning the id list unchanged.", exc, False, None)
return tmp |
def find_in_coord_list_pbc(fcoord_list, fcoord, atol=1e-8):
"""
Get the indices of all points in a fractional coord list that are
equal to a fractional coord (with a tolerance), taking into account
periodic boundary conditions.
Args:
fcoord_list: List of fractional coords
fcoord: A specific fractional coord to test.
atol: Absolute tolerance. Defaults to 1e-8.
Returns:
Indices of matches, e.g., [0, 1, 2, 3]. Empty list if not found.
"""
if len(fcoord_list) == 0:
return []
fcoords = np.tile(fcoord, (len(fcoord_list), 1))
fdist = fcoord_list - fcoords
fdist -= np.round(fdist)
return np.where(np.all(np.abs(fdist) < atol, axis=1))[0] | Get the indices of all points in a fractional coord list that are
equal to a fractional coord (with a tolerance), taking into account
periodic boundary conditions.
Args:
fcoord_list: List of fractional coords
fcoord: A specific fractional coord to test.
atol: Absolute tolerance. Defaults to 1e-8.
Returns:
Indices of matches, e.g., [0, 1, 2, 3]. Empty list if not found. | Below is the the instruction that describes the task:
### Input:
Get the indices of all points in a fractional coord list that are
equal to a fractional coord (with a tolerance), taking into account
periodic boundary conditions.
Args:
fcoord_list: List of fractional coords
fcoord: A specific fractional coord to test.
atol: Absolute tolerance. Defaults to 1e-8.
Returns:
Indices of matches, e.g., [0, 1, 2, 3]. Empty list if not found.
### Response:
def find_in_coord_list_pbc(fcoord_list, fcoord, atol=1e-8):
"""
Get the indices of all points in a fractional coord list that are
equal to a fractional coord (with a tolerance), taking into account
periodic boundary conditions.
Args:
fcoord_list: List of fractional coords
fcoord: A specific fractional coord to test.
atol: Absolute tolerance. Defaults to 1e-8.
Returns:
Indices of matches, e.g., [0, 1, 2, 3]. Empty list if not found.
"""
if len(fcoord_list) == 0:
return []
fcoords = np.tile(fcoord, (len(fcoord_list), 1))
fdist = fcoord_list - fcoords
fdist -= np.round(fdist)
return np.where(np.all(np.abs(fdist) < atol, axis=1))[0] |
def _split_span(self, span, index, length=0):
"""Split a span into two or three separate spans at certain indices."""
offset = span[1] + index if index < 0 else span[0] + index
# log.debug([(span[0], offset), (offset, offset + length), (offset + length, span[1])])
return [(span[0], offset), (offset, offset + length), (offset + length, span[1])] | Split a span into two or three separate spans at certain indices. | Below is the the instruction that describes the task:
### Input:
Split a span into two or three separate spans at certain indices.
### Response:
def _split_span(self, span, index, length=0):
"""Split a span into two or three separate spans at certain indices."""
offset = span[1] + index if index < 0 else span[0] + index
# log.debug([(span[0], offset), (offset, offset + length), (offset + length, span[1])])
return [(span[0], offset), (offset, offset + length), (offset + length, span[1])] |
def get_window_location(self, window):
"""
Get a window's location.
"""
screen_ret = Screen()
x_ret = ctypes.c_int(0)
y_ret = ctypes.c_int(0)
_libxdo.xdo_get_window_location(
self._xdo, window, ctypes.byref(x_ret), ctypes.byref(y_ret),
ctypes.byref(screen_ret))
return window_location(x_ret.value, y_ret.value, screen_ret) | Get a window's location. | Below is the the instruction that describes the task:
### Input:
Get a window's location.
### Response:
def get_window_location(self, window):
"""
Get a window's location.
"""
screen_ret = Screen()
x_ret = ctypes.c_int(0)
y_ret = ctypes.c_int(0)
_libxdo.xdo_get_window_location(
self._xdo, window, ctypes.byref(x_ret), ctypes.byref(y_ret),
ctypes.byref(screen_ret))
return window_location(x_ret.value, y_ret.value, screen_ret) |
def get_write_fields(self):
"""
Get the list of fields used to write the header, separating
record and signal specification fields. Returns the default
required fields, the user defined fields,
and their dependencies.
Does NOT include `d_signal` or `e_d_signal`.
Returns
-------
rec_write_fields : list
Record specification fields to be written. Includes
'comment' if present.
sig_write_fields : dict
Dictionary of signal specification fields to be written,
with values equal to the channels that need to be present
for each field.
"""
# Record specification fields
rec_write_fields = self.get_write_subset('record')
# Add comments if any
if self.comments != None:
rec_write_fields.append('comments')
# Get required signal fields if signals are present.
self.check_field('n_sig')
if self.n_sig > 0:
sig_write_fields = self.get_write_subset('signal')
else:
sig_write_fields = None
return rec_write_fields, sig_write_fields | Get the list of fields used to write the header, separating
record and signal specification fields. Returns the default
required fields, the user defined fields,
and their dependencies.
Does NOT include `d_signal` or `e_d_signal`.
Returns
-------
rec_write_fields : list
Record specification fields to be written. Includes
'comment' if present.
sig_write_fields : dict
Dictionary of signal specification fields to be written,
with values equal to the channels that need to be present
for each field. | Below is the the instruction that describes the task:
### Input:
Get the list of fields used to write the header, separating
record and signal specification fields. Returns the default
required fields, the user defined fields,
and their dependencies.
Does NOT include `d_signal` or `e_d_signal`.
Returns
-------
rec_write_fields : list
Record specification fields to be written. Includes
'comment' if present.
sig_write_fields : dict
Dictionary of signal specification fields to be written,
with values equal to the channels that need to be present
for each field.
### Response:
def get_write_fields(self):
"""
Get the list of fields used to write the header, separating
record and signal specification fields. Returns the default
required fields, the user defined fields,
and their dependencies.
Does NOT include `d_signal` or `e_d_signal`.
Returns
-------
rec_write_fields : list
Record specification fields to be written. Includes
'comment' if present.
sig_write_fields : dict
Dictionary of signal specification fields to be written,
with values equal to the channels that need to be present
for each field.
"""
# Record specification fields
rec_write_fields = self.get_write_subset('record')
# Add comments if any
if self.comments != None:
rec_write_fields.append('comments')
# Get required signal fields if signals are present.
self.check_field('n_sig')
if self.n_sig > 0:
sig_write_fields = self.get_write_subset('signal')
else:
sig_write_fields = None
return rec_write_fields, sig_write_fields |
def get_compositions(self):
"""Gets the composition list resulting from a search.
return: (osid.repository.CompositionList) - the composition list
raise: IllegalState - the list has already been retrieved
*compliance: mandatory -- This method must be implemented.*
"""
if self.retrieved:
raise errors.IllegalState('List has already been retrieved.')
self.retrieved = True
return objects.CompositionList(self._results, runtime=self._runtime) | Gets the composition list resulting from a search.
return: (osid.repository.CompositionList) - the composition list
raise: IllegalState - the list has already been retrieved
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets the composition list resulting from a search.
return: (osid.repository.CompositionList) - the composition list
raise: IllegalState - the list has already been retrieved
*compliance: mandatory -- This method must be implemented.*
### Response:
def get_compositions(self):
"""Gets the composition list resulting from a search.
return: (osid.repository.CompositionList) - the composition list
raise: IllegalState - the list has already been retrieved
*compliance: mandatory -- This method must be implemented.*
"""
if self.retrieved:
raise errors.IllegalState('List has already been retrieved.')
self.retrieved = True
return objects.CompositionList(self._results, runtime=self._runtime) |
def release(self):
"""Try to delete the lock files. Doesn't matter if we fail"""
if self.lock_filename != self.pid_filename:
try:
os.unlink(self.lock_filename)
except OSError:
pass
try:
os.remove(self.pid_filename)
except OSError:
pass | Try to delete the lock files. Doesn't matter if we fail | Below is the the instruction that describes the task:
### Input:
Try to delete the lock files. Doesn't matter if we fail
### Response:
def release(self):
"""Try to delete the lock files. Doesn't matter if we fail"""
if self.lock_filename != self.pid_filename:
try:
os.unlink(self.lock_filename)
except OSError:
pass
try:
os.remove(self.pid_filename)
except OSError:
pass |
def write(self, data, assert_ss=True, deassert_ss=True):
"""Half-duplex SPI write. If assert_ss is True, the SS line will be
asserted low, the specified bytes will be clocked out the MOSI line, and
if deassert_ss is True the SS line be put back high.
"""
# Fail MOSI is not specified.
if self._mosi is None:
raise RuntimeError('Write attempted with no MOSI pin specified.')
if assert_ss and self._ss is not None:
self._gpio.set_low(self._ss)
for byte in data:
for i in range(8):
# Write bit to MOSI.
if self._write_shift(byte, i) & self._mask:
self._gpio.set_high(self._mosi)
else:
self._gpio.set_low(self._mosi)
# Flip clock off base.
self._gpio.output(self._sclk, not self._clock_base)
# Return clock to base.
self._gpio.output(self._sclk, self._clock_base)
if deassert_ss and self._ss is not None:
self._gpio.set_high(self._ss) | Half-duplex SPI write. If assert_ss is True, the SS line will be
asserted low, the specified bytes will be clocked out the MOSI line, and
if deassert_ss is True the SS line be put back high. | Below is the the instruction that describes the task:
### Input:
Half-duplex SPI write. If assert_ss is True, the SS line will be
asserted low, the specified bytes will be clocked out the MOSI line, and
if deassert_ss is True the SS line be put back high.
### Response:
def write(self, data, assert_ss=True, deassert_ss=True):
"""Half-duplex SPI write. If assert_ss is True, the SS line will be
asserted low, the specified bytes will be clocked out the MOSI line, and
if deassert_ss is True the SS line be put back high.
"""
# Fail MOSI is not specified.
if self._mosi is None:
raise RuntimeError('Write attempted with no MOSI pin specified.')
if assert_ss and self._ss is not None:
self._gpio.set_low(self._ss)
for byte in data:
for i in range(8):
# Write bit to MOSI.
if self._write_shift(byte, i) & self._mask:
self._gpio.set_high(self._mosi)
else:
self._gpio.set_low(self._mosi)
# Flip clock off base.
self._gpio.output(self._sclk, not self._clock_base)
# Return clock to base.
self._gpio.output(self._sclk, self._clock_base)
if deassert_ss and self._ss is not None:
self._gpio.set_high(self._ss) |
def load_tf_weights_in_bert(model, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
tf_path = os.path.abspath(tf_checkpoint_path)
print("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
print("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split('/')
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(n in ["adam_v", "adam_m", "global_step"] for n in name):
print("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
l = re.split(r'_(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'kernel' or l[0] == 'gamma':
pointer = getattr(pointer, 'weight')
elif l[0] == 'output_bias' or l[0] == 'beta':
pointer = getattr(pointer, 'bias')
elif l[0] == 'output_weights':
pointer = getattr(pointer, 'weight')
elif l[0] == 'squad':
pointer = getattr(pointer, 'classifier')
else:
try:
pointer = getattr(pointer, l[0])
except AttributeError:
print("Skipping {}".format("/".join(name)))
continue
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
if m_name[-11:] == '_embeddings':
pointer = getattr(pointer, 'weight')
elif m_name == 'kernel':
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model | Load tf checkpoints in a pytorch model | Below is the the instruction that describes the task:
### Input:
Load tf checkpoints in a pytorch model
### Response:
def load_tf_weights_in_bert(model, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
tf_path = os.path.abspath(tf_checkpoint_path)
print("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
print("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split('/')
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(n in ["adam_v", "adam_m", "global_step"] for n in name):
print("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
l = re.split(r'_(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'kernel' or l[0] == 'gamma':
pointer = getattr(pointer, 'weight')
elif l[0] == 'output_bias' or l[0] == 'beta':
pointer = getattr(pointer, 'bias')
elif l[0] == 'output_weights':
pointer = getattr(pointer, 'weight')
elif l[0] == 'squad':
pointer = getattr(pointer, 'classifier')
else:
try:
pointer = getattr(pointer, l[0])
except AttributeError:
print("Skipping {}".format("/".join(name)))
continue
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
if m_name[-11:] == '_embeddings':
pointer = getattr(pointer, 'weight')
elif m_name == 'kernel':
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model |
def delayed_burst_run(self, target_cycles_per_sec):
""" Run CPU not faster than given speedlimit """
old_cycles = self.cycles
start_time = time.time()
self.burst_run()
is_duration = time.time() - start_time
new_cycles = self.cycles - old_cycles
try:
is_cycles_per_sec = new_cycles / is_duration
except ZeroDivisionError:
pass
else:
should_burst_duration = is_cycles_per_sec / target_cycles_per_sec
target_duration = should_burst_duration * is_duration
delay = target_duration - is_duration
if delay > 0:
if delay > self.max_delay:
self.delay = self.max_delay
else:
self.delay = delay
time.sleep(self.delay)
self.call_sync_callbacks() | Run CPU not faster than given speedlimit | Below is the the instruction that describes the task:
### Input:
Run CPU not faster than given speedlimit
### Response:
def delayed_burst_run(self, target_cycles_per_sec):
""" Run CPU not faster than given speedlimit """
old_cycles = self.cycles
start_time = time.time()
self.burst_run()
is_duration = time.time() - start_time
new_cycles = self.cycles - old_cycles
try:
is_cycles_per_sec = new_cycles / is_duration
except ZeroDivisionError:
pass
else:
should_burst_duration = is_cycles_per_sec / target_cycles_per_sec
target_duration = should_burst_duration * is_duration
delay = target_duration - is_duration
if delay > 0:
if delay > self.max_delay:
self.delay = self.max_delay
else:
self.delay = delay
time.sleep(self.delay)
self.call_sync_callbacks() |
def to_tsv():
""" Save all regular expressions to a tsv file so they can be more easily copy/pasted in Sublime """
with open(os.path.join(DATA_PATH, 'regexes.tsv'), mode='wt') as fout:
vars = copy.copy(tuple(globals().items()))
for k, v in vars:
if k.lower().startswith('cre_'):
fout.write(k[4:] + '\t' + v.pattern + '\n')
elif k.lower().startswith('re_'):
fout.write(k[3:] + '\t' + v.pattern + '\n') | Save all regular expressions to a tsv file so they can be more easily copy/pasted in Sublime | Below is the the instruction that describes the task:
### Input:
Save all regular expressions to a tsv file so they can be more easily copy/pasted in Sublime
### Response:
def to_tsv():
""" Save all regular expressions to a tsv file so they can be more easily copy/pasted in Sublime """
with open(os.path.join(DATA_PATH, 'regexes.tsv'), mode='wt') as fout:
vars = copy.copy(tuple(globals().items()))
for k, v in vars:
if k.lower().startswith('cre_'):
fout.write(k[4:] + '\t' + v.pattern + '\n')
elif k.lower().startswith('re_'):
fout.write(k[3:] + '\t' + v.pattern + '\n') |
def build_graph(self):
"""
Read lazy dependency list and build graph.
"""
for child, parents in self.dependencies.items():
if child not in self.nodes:
raise NodeNotFoundError(
"App %s SQL item dependencies reference nonexistent child node %r" % (
child[0], child),
child
)
for parent in parents:
if parent not in self.nodes:
raise NodeNotFoundError(
"App %s SQL item dependencies reference nonexistent parent node %r" % (
child[0], parent),
parent
)
self.node_map[child].add_parent(self.node_map[parent])
self.node_map[parent].add_child(self.node_map[child])
for node in self.nodes:
self.ensure_not_cyclic(node,
lambda x: (parent.key for parent in self.node_map[x].parents)) | Read lazy dependency list and build graph. | Below is the the instruction that describes the task:
### Input:
Read lazy dependency list and build graph.
### Response:
def build_graph(self):
"""
Read lazy dependency list and build graph.
"""
for child, parents in self.dependencies.items():
if child not in self.nodes:
raise NodeNotFoundError(
"App %s SQL item dependencies reference nonexistent child node %r" % (
child[0], child),
child
)
for parent in parents:
if parent not in self.nodes:
raise NodeNotFoundError(
"App %s SQL item dependencies reference nonexistent parent node %r" % (
child[0], parent),
parent
)
self.node_map[child].add_parent(self.node_map[parent])
self.node_map[parent].add_child(self.node_map[child])
for node in self.nodes:
self.ensure_not_cyclic(node,
lambda x: (parent.key for parent in self.node_map[x].parents)) |
def getMac256Hash(challenge, appId="[email protected]", key="Q1P7W2E4J9R8U3S5"):
"""
Generate the lock-and-key response, needed to acquire registration tokens.
"""
clearText = challenge + appId
clearText += "0" * (8 - len(clearText) % 8)
def int32ToHexString(n):
hexChars = "0123456789abcdef"
hexString = ""
for i in range(4):
hexString += hexChars[(n >> (i * 8 + 4)) & 15]
hexString += hexChars[(n >> (i * 8)) & 15]
return hexString
def int64Xor(a, b):
sA = "{0:b}".format(a)
sB = "{0:b}".format(b)
sC = ""
sD = ""
diff = abs(len(sA) - len(sB))
for i in range(diff):
sD += "0"
if len(sA) < len(sB):
sD += sA
sA = sD
elif len(sB) < len(sA):
sD += sB
sB = sD
for i in range(len(sA)):
sC += "0" if sA[i] == sB[i] else "1"
return int(sC, 2)
def cS64(pdwData, pInHash):
MODULUS = 2147483647
CS64_a = pInHash[0] & MODULUS
CS64_b = pInHash[1] & MODULUS
CS64_c = pInHash[2] & MODULUS
CS64_d = pInHash[3] & MODULUS
CS64_e = 242854337
pos = 0
qwDatum = 0
qwMAC = 0
qwSum = 0
for i in range(len(pdwData) // 2):
qwDatum = int(pdwData[pos])
pos += 1
qwDatum *= CS64_e
qwDatum = qwDatum % MODULUS
qwMAC += qwDatum
qwMAC *= CS64_a
qwMAC += CS64_b
qwMAC = qwMAC % MODULUS
qwSum += qwMAC
qwMAC += int(pdwData[pos])
pos += 1
qwMAC *= CS64_c
qwMAC += CS64_d
qwMAC = qwMAC % MODULUS
qwSum += qwMAC
qwMAC += CS64_b
qwMAC = qwMAC % MODULUS
qwSum += CS64_d
qwSum = qwSum % MODULUS
return [qwMAC, qwSum]
cchClearText = len(clearText) // 4
pClearText = []
for i in range(cchClearText):
pClearText = pClearText[:i] + [0] + pClearText[i:]
for pos in range(4):
pClearText[i] += ord(clearText[4 * i + pos]) * (256 ** pos)
sha256Hash = [0, 0, 0, 0]
hash = hashlib.sha256((challenge + key).encode("utf-8")).hexdigest().upper()
for i in range(len(sha256Hash)):
sha256Hash[i] = 0
for pos in range(4):
dpos = 8 * i + pos * 2
sha256Hash[i] += int(hash[dpos:dpos + 2], 16) * (256 ** pos)
macHash = cS64(pClearText, sha256Hash)
macParts = [macHash[0], macHash[1], macHash[0], macHash[1]]
return "".join(map(int32ToHexString, map(int64Xor, sha256Hash, macParts))) | Generate the lock-and-key response, needed to acquire registration tokens. | Below is the the instruction that describes the task:
### Input:
Generate the lock-and-key response, needed to acquire registration tokens.
### Response:
def getMac256Hash(challenge, appId="[email protected]", key="Q1P7W2E4J9R8U3S5"):
"""
Generate the lock-and-key response, needed to acquire registration tokens.
"""
clearText = challenge + appId
clearText += "0" * (8 - len(clearText) % 8)
def int32ToHexString(n):
hexChars = "0123456789abcdef"
hexString = ""
for i in range(4):
hexString += hexChars[(n >> (i * 8 + 4)) & 15]
hexString += hexChars[(n >> (i * 8)) & 15]
return hexString
def int64Xor(a, b):
sA = "{0:b}".format(a)
sB = "{0:b}".format(b)
sC = ""
sD = ""
diff = abs(len(sA) - len(sB))
for i in range(diff):
sD += "0"
if len(sA) < len(sB):
sD += sA
sA = sD
elif len(sB) < len(sA):
sD += sB
sB = sD
for i in range(len(sA)):
sC += "0" if sA[i] == sB[i] else "1"
return int(sC, 2)
def cS64(pdwData, pInHash):
MODULUS = 2147483647
CS64_a = pInHash[0] & MODULUS
CS64_b = pInHash[1] & MODULUS
CS64_c = pInHash[2] & MODULUS
CS64_d = pInHash[3] & MODULUS
CS64_e = 242854337
pos = 0
qwDatum = 0
qwMAC = 0
qwSum = 0
for i in range(len(pdwData) // 2):
qwDatum = int(pdwData[pos])
pos += 1
qwDatum *= CS64_e
qwDatum = qwDatum % MODULUS
qwMAC += qwDatum
qwMAC *= CS64_a
qwMAC += CS64_b
qwMAC = qwMAC % MODULUS
qwSum += qwMAC
qwMAC += int(pdwData[pos])
pos += 1
qwMAC *= CS64_c
qwMAC += CS64_d
qwMAC = qwMAC % MODULUS
qwSum += qwMAC
qwMAC += CS64_b
qwMAC = qwMAC % MODULUS
qwSum += CS64_d
qwSum = qwSum % MODULUS
return [qwMAC, qwSum]
cchClearText = len(clearText) // 4
pClearText = []
for i in range(cchClearText):
pClearText = pClearText[:i] + [0] + pClearText[i:]
for pos in range(4):
pClearText[i] += ord(clearText[4 * i + pos]) * (256 ** pos)
sha256Hash = [0, 0, 0, 0]
hash = hashlib.sha256((challenge + key).encode("utf-8")).hexdigest().upper()
for i in range(len(sha256Hash)):
sha256Hash[i] = 0
for pos in range(4):
dpos = 8 * i + pos * 2
sha256Hash[i] += int(hash[dpos:dpos + 2], 16) * (256 ** pos)
macHash = cS64(pClearText, sha256Hash)
macParts = [macHash[0], macHash[1], macHash[0], macHash[1]]
return "".join(map(int32ToHexString, map(int64Xor, sha256Hash, macParts))) |
def _parse_custom_mpi_options(custom_mpi_options):
# type: (str) -> Tuple[argparse.Namespace, List[str]]
"""Parse custom MPI options provided by user. Known options default value will be overridden
and unknown options would be identified separately."""
parser = argparse.ArgumentParser()
parser.add_argument('--NCCL_DEBUG', default="INFO", type=str)
return parser.parse_known_args(custom_mpi_options.split()) | Parse custom MPI options provided by user. Known options default value will be overridden
and unknown options would be identified separately. | Below is the the instruction that describes the task:
### Input:
Parse custom MPI options provided by user. Known options default value will be overridden
and unknown options would be identified separately.
### Response:
def _parse_custom_mpi_options(custom_mpi_options):
# type: (str) -> Tuple[argparse.Namespace, List[str]]
"""Parse custom MPI options provided by user. Known options default value will be overridden
and unknown options would be identified separately."""
parser = argparse.ArgumentParser()
parser.add_argument('--NCCL_DEBUG', default="INFO", type=str)
return parser.parse_known_args(custom_mpi_options.split()) |
def install(name, minimum_version=None, required_version=None, scope=None,
repository=None):
'''
Install a Powershell module from powershell gallery on the system.
:param name: Name of a Powershell module
:type name: ``str``
:param minimum_version: The maximum version to install, e.g. 1.23.2
:type minimum_version: ``str``
:param required_version: Install a specific version
:type required_version: ``str``
:param scope: The scope to install the module to, e.g. CurrentUser, Computer
:type scope: ``str``
:param repository: The friendly name of a private repository, e.g. MyREpo
:type repository: ``str``
CLI Example:
.. code-block:: bash
salt 'win01' psget.install PowerPlan
'''
# Putting quotes around the parameter protects against command injection
flags = [('Name', name)]
if minimum_version is not None:
flags.append(('MinimumVersion', minimum_version))
if required_version is not None:
flags.append(('RequiredVersion', required_version))
if scope is not None:
flags.append(('Scope', scope))
if repository is not None:
flags.append(('Repository', repository))
params = ''
for flag, value in flags:
params += '-{0} {1} '.format(flag, value)
cmd = 'Install-Module {0} -Force'.format(params)
_pshell(cmd)
return name in list_modules() | Install a Powershell module from powershell gallery on the system.
:param name: Name of a Powershell module
:type name: ``str``
:param minimum_version: The maximum version to install, e.g. 1.23.2
:type minimum_version: ``str``
:param required_version: Install a specific version
:type required_version: ``str``
:param scope: The scope to install the module to, e.g. CurrentUser, Computer
:type scope: ``str``
:param repository: The friendly name of a private repository, e.g. MyREpo
:type repository: ``str``
CLI Example:
.. code-block:: bash
salt 'win01' psget.install PowerPlan | Below is the the instruction that describes the task:
### Input:
Install a Powershell module from powershell gallery on the system.
:param name: Name of a Powershell module
:type name: ``str``
:param minimum_version: The maximum version to install, e.g. 1.23.2
:type minimum_version: ``str``
:param required_version: Install a specific version
:type required_version: ``str``
:param scope: The scope to install the module to, e.g. CurrentUser, Computer
:type scope: ``str``
:param repository: The friendly name of a private repository, e.g. MyREpo
:type repository: ``str``
CLI Example:
.. code-block:: bash
salt 'win01' psget.install PowerPlan
### Response:
def install(name, minimum_version=None, required_version=None, scope=None,
repository=None):
'''
Install a Powershell module from powershell gallery on the system.
:param name: Name of a Powershell module
:type name: ``str``
:param minimum_version: The maximum version to install, e.g. 1.23.2
:type minimum_version: ``str``
:param required_version: Install a specific version
:type required_version: ``str``
:param scope: The scope to install the module to, e.g. CurrentUser, Computer
:type scope: ``str``
:param repository: The friendly name of a private repository, e.g. MyREpo
:type repository: ``str``
CLI Example:
.. code-block:: bash
salt 'win01' psget.install PowerPlan
'''
# Putting quotes around the parameter protects against command injection
flags = [('Name', name)]
if minimum_version is not None:
flags.append(('MinimumVersion', minimum_version))
if required_version is not None:
flags.append(('RequiredVersion', required_version))
if scope is not None:
flags.append(('Scope', scope))
if repository is not None:
flags.append(('Repository', repository))
params = ''
for flag, value in flags:
params += '-{0} {1} '.format(flag, value)
cmd = 'Install-Module {0} -Force'.format(params)
_pshell(cmd)
return name in list_modules() |
def commits(self, branch, since = 0, to = int(time.time()) + 86400):
"""For given branch return a list of commits.
Each commit contains basic information about itself.
:param branch: git branch
:type branch: [str]{}
:param since: minimal timestamp for commit's commit date
:type since: int
:param to: maximal timestamp for commit's commit date
:type to: int
"""
# checkout the branch
self.repo.create_head(branch, "refs/remotes/origin/%s" % branch)
since_str = datetime.datetime.fromtimestamp(since).strftime('%Y-%m-%d %H:%M:%S')
commits = {}
for commit in self.repo.iter_commits(branch, since=since_str):
# filter out all commits younger then to
if commit.committed_date > to:
continue
commits[commit.hexsha] = self._commitData(commit)
return commits | For given branch return a list of commits.
Each commit contains basic information about itself.
:param branch: git branch
:type branch: [str]{}
:param since: minimal timestamp for commit's commit date
:type since: int
:param to: maximal timestamp for commit's commit date
:type to: int | Below is the the instruction that describes the task:
### Input:
For given branch return a list of commits.
Each commit contains basic information about itself.
:param branch: git branch
:type branch: [str]{}
:param since: minimal timestamp for commit's commit date
:type since: int
:param to: maximal timestamp for commit's commit date
:type to: int
### Response:
def commits(self, branch, since = 0, to = int(time.time()) + 86400):
"""For given branch return a list of commits.
Each commit contains basic information about itself.
:param branch: git branch
:type branch: [str]{}
:param since: minimal timestamp for commit's commit date
:type since: int
:param to: maximal timestamp for commit's commit date
:type to: int
"""
# checkout the branch
self.repo.create_head(branch, "refs/remotes/origin/%s" % branch)
since_str = datetime.datetime.fromtimestamp(since).strftime('%Y-%m-%d %H:%M:%S')
commits = {}
for commit in self.repo.iter_commits(branch, since=since_str):
# filter out all commits younger then to
if commit.committed_date > to:
continue
commits[commit.hexsha] = self._commitData(commit)
return commits |
def _split_names(namestr):
"""Split a comma-separated list of channel names.
"""
out = []
namestr = QUOTE_REGEX.sub('', namestr)
while True:
namestr = namestr.strip('\' \n')
if ',' not in namestr:
break
for nds2type in io_nds2.Nds2ChannelType.names() + ['']:
if nds2type and ',%s' % nds2type in namestr:
try:
channel, ctype, namestr = namestr.split(',', 2)
except ValueError:
channel, ctype = namestr.split(',')
namestr = ''
out.append('%s,%s' % (channel, ctype))
break
elif nds2type == '' and ',' in namestr:
channel, namestr = namestr.split(',', 1)
out.append(channel)
break
if namestr:
out.append(namestr)
return out | Split a comma-separated list of channel names. | Below is the the instruction that describes the task:
### Input:
Split a comma-separated list of channel names.
### Response:
def _split_names(namestr):
"""Split a comma-separated list of channel names.
"""
out = []
namestr = QUOTE_REGEX.sub('', namestr)
while True:
namestr = namestr.strip('\' \n')
if ',' not in namestr:
break
for nds2type in io_nds2.Nds2ChannelType.names() + ['']:
if nds2type and ',%s' % nds2type in namestr:
try:
channel, ctype, namestr = namestr.split(',', 2)
except ValueError:
channel, ctype = namestr.split(',')
namestr = ''
out.append('%s,%s' % (channel, ctype))
break
elif nds2type == '' and ',' in namestr:
channel, namestr = namestr.split(',', 1)
out.append(channel)
break
if namestr:
out.append(namestr)
return out |
def svm_load_model(model_file_name):
"""
svm_load_model(model_file_name) -> model
Load a LIBSVM model from model_file_name and return.
"""
model = libsvm.svm_load_model(model_file_name.encode())
if not model:
print("can't open model file %s" % model_file_name)
return None
model = toPyModel(model)
return model | svm_load_model(model_file_name) -> model
Load a LIBSVM model from model_file_name and return. | Below is the the instruction that describes the task:
### Input:
svm_load_model(model_file_name) -> model
Load a LIBSVM model from model_file_name and return.
### Response:
def svm_load_model(model_file_name):
"""
svm_load_model(model_file_name) -> model
Load a LIBSVM model from model_file_name and return.
"""
model = libsvm.svm_load_model(model_file_name.encode())
if not model:
print("can't open model file %s" % model_file_name)
return None
model = toPyModel(model)
return model |
def form_invalid(self, form, prefix=None):
""" If form invalid return error list in JSON response """
response = super(FormAjaxMixin, self).form_invalid(form)
if self.request.is_ajax():
data = {
"errors_list": self.add_prefix(form.errors, prefix),
}
return self.json_to_response(status=400, json_data=data,
json_status=AjaxResponseStatus.ERROR)
return response | If form invalid return error list in JSON response | Below is the the instruction that describes the task:
### Input:
If form invalid return error list in JSON response
### Response:
def form_invalid(self, form, prefix=None):
""" If form invalid return error list in JSON response """
response = super(FormAjaxMixin, self).form_invalid(form)
if self.request.is_ajax():
data = {
"errors_list": self.add_prefix(form.errors, prefix),
}
return self.json_to_response(status=400, json_data=data,
json_status=AjaxResponseStatus.ERROR)
return response |
def convert_leakyrelu(node, **kwargs):
"""Map MXNet's LeakyReLU operator attributes to onnx's Elu/LeakyRelu/PRelu operators
based on the input node's attributes and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
act_type = attrs.get("act_type", "leaky")
alpha = float(attrs.get("slope", 0.25))
act_name = {"elu": "Elu", "leaky": "LeakyRelu", "prelu": "PRelu",
"selu": "Selu"}
if act_type == "prelu" or act_type == "selu":
node = onnx.helper.make_node(
act_name[act_type],
inputs=input_nodes,
outputs=[name],
name=name)
else:
node = onnx.helper.make_node(
act_name[act_type],
inputs=input_nodes,
outputs=[name],
name=name,
alpha=alpha)
return [node] | Map MXNet's LeakyReLU operator attributes to onnx's Elu/LeakyRelu/PRelu operators
based on the input node's attributes and return the created node. | Below is the the instruction that describes the task:
### Input:
Map MXNet's LeakyReLU operator attributes to onnx's Elu/LeakyRelu/PRelu operators
based on the input node's attributes and return the created node.
### Response:
def convert_leakyrelu(node, **kwargs):
"""Map MXNet's LeakyReLU operator attributes to onnx's Elu/LeakyRelu/PRelu operators
based on the input node's attributes and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
act_type = attrs.get("act_type", "leaky")
alpha = float(attrs.get("slope", 0.25))
act_name = {"elu": "Elu", "leaky": "LeakyRelu", "prelu": "PRelu",
"selu": "Selu"}
if act_type == "prelu" or act_type == "selu":
node = onnx.helper.make_node(
act_name[act_type],
inputs=input_nodes,
outputs=[name],
name=name)
else:
node = onnx.helper.make_node(
act_name[act_type],
inputs=input_nodes,
outputs=[name],
name=name,
alpha=alpha)
return [node] |
def get_accounts(self, owner_id=None, member_id=None, properties=None):
"""GetAccounts.
Get a list of accounts for a specific owner or a specific member.
:param str owner_id: ID for the owner of the accounts.
:param str member_id: ID for a member of the accounts.
:param str properties:
:rtype: [Account]
"""
query_parameters = {}
if owner_id is not None:
query_parameters['ownerId'] = self._serialize.query('owner_id', owner_id, 'str')
if member_id is not None:
query_parameters['memberId'] = self._serialize.query('member_id', member_id, 'str')
if properties is not None:
query_parameters['properties'] = self._serialize.query('properties', properties, 'str')
response = self._send(http_method='GET',
location_id='229a6a53-b428-4ffb-a835-e8f36b5b4b1e',
version='5.0',
query_parameters=query_parameters)
return self._deserialize('[Account]', self._unwrap_collection(response)) | GetAccounts.
Get a list of accounts for a specific owner or a specific member.
:param str owner_id: ID for the owner of the accounts.
:param str member_id: ID for a member of the accounts.
:param str properties:
:rtype: [Account] | Below is the the instruction that describes the task:
### Input:
GetAccounts.
Get a list of accounts for a specific owner or a specific member.
:param str owner_id: ID for the owner of the accounts.
:param str member_id: ID for a member of the accounts.
:param str properties:
:rtype: [Account]
### Response:
def get_accounts(self, owner_id=None, member_id=None, properties=None):
"""GetAccounts.
Get a list of accounts for a specific owner or a specific member.
:param str owner_id: ID for the owner of the accounts.
:param str member_id: ID for a member of the accounts.
:param str properties:
:rtype: [Account]
"""
query_parameters = {}
if owner_id is not None:
query_parameters['ownerId'] = self._serialize.query('owner_id', owner_id, 'str')
if member_id is not None:
query_parameters['memberId'] = self._serialize.query('member_id', member_id, 'str')
if properties is not None:
query_parameters['properties'] = self._serialize.query('properties', properties, 'str')
response = self._send(http_method='GET',
location_id='229a6a53-b428-4ffb-a835-e8f36b5b4b1e',
version='5.0',
query_parameters=query_parameters)
return self._deserialize('[Account]', self._unwrap_collection(response)) |
def run(path, code=None, params=None, **meta):
"""Check code with pycodestyle.
:return list: List of errors.
"""
parser = get_parser()
for option in parser.option_list:
if option.dest and option.dest in params:
value = params[option.dest]
if isinstance(value, str):
params[option.dest] = option.convert_value(option, value)
for key in ["filename", "exclude", "select", "ignore"]:
if key in params and isinstance(params[key], str):
params[key] = _parse_multi_options(params[key])
P8Style = StyleGuide(reporter=_PycodestyleReport, **params)
buf = StringIO(code)
return P8Style.input_file(path, lines=buf.readlines()) | Check code with pycodestyle.
:return list: List of errors. | Below is the the instruction that describes the task:
### Input:
Check code with pycodestyle.
:return list: List of errors.
### Response:
def run(path, code=None, params=None, **meta):
"""Check code with pycodestyle.
:return list: List of errors.
"""
parser = get_parser()
for option in parser.option_list:
if option.dest and option.dest in params:
value = params[option.dest]
if isinstance(value, str):
params[option.dest] = option.convert_value(option, value)
for key in ["filename", "exclude", "select", "ignore"]:
if key in params and isinstance(params[key], str):
params[key] = _parse_multi_options(params[key])
P8Style = StyleGuide(reporter=_PycodestyleReport, **params)
buf = StringIO(code)
return P8Style.input_file(path, lines=buf.readlines()) |
def get_departures(self, station):
"""
Fetch the current departure times from this station
http://webservices.ns.nl/ns-api-avt?station=${Naam of afkorting Station}
@param station: station to lookup
"""
url = 'http://webservices.ns.nl/ns-api-avt?station=' + station
raw_departures = self._request('GET', url)
return self.parse_departures(raw_departures) | Fetch the current departure times from this station
http://webservices.ns.nl/ns-api-avt?station=${Naam of afkorting Station}
@param station: station to lookup | Below is the the instruction that describes the task:
### Input:
Fetch the current departure times from this station
http://webservices.ns.nl/ns-api-avt?station=${Naam of afkorting Station}
@param station: station to lookup
### Response:
def get_departures(self, station):
"""
Fetch the current departure times from this station
http://webservices.ns.nl/ns-api-avt?station=${Naam of afkorting Station}
@param station: station to lookup
"""
url = 'http://webservices.ns.nl/ns-api-avt?station=' + station
raw_departures = self._request('GET', url)
return self.parse_departures(raw_departures) |
def all(self, endpoint, *args, **kwargs):
"""Retrieve all the data of a paginated endpoint, using GET.
:returns: The endpoint unpaginated data
:rtype: dict
"""
# 1. Initialize the pagination parameters.
kwargs.setdefault('params', {})['offset'] = 0
kwargs.setdefault('params', {})['limit'] = self.limit
kwargs['__method__'] = 'get'
# 2. Create an initial paginated request.
payload = self.request(endpoint, *args, **kwargs)
has_next = payload.get('result', {}).setdefault(
'meta', {'next': None}
)['next']
# 3. Loop until the end
while has_next:
# 4. Increment the offset
kwargs['params']['offset'] += self.limit
# 5. Query again
_payload = self.request(endpoint, *args, **kwargs)
# 6. Add the paginated data to the global one
payload['result']['data'].extend(_payload['result']['data'])
# 7. Compute has_next
has_next = _payload['result']['meta']['next']
del payload['result']['meta']
return payload | Retrieve all the data of a paginated endpoint, using GET.
:returns: The endpoint unpaginated data
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Retrieve all the data of a paginated endpoint, using GET.
:returns: The endpoint unpaginated data
:rtype: dict
### Response:
def all(self, endpoint, *args, **kwargs):
"""Retrieve all the data of a paginated endpoint, using GET.
:returns: The endpoint unpaginated data
:rtype: dict
"""
# 1. Initialize the pagination parameters.
kwargs.setdefault('params', {})['offset'] = 0
kwargs.setdefault('params', {})['limit'] = self.limit
kwargs['__method__'] = 'get'
# 2. Create an initial paginated request.
payload = self.request(endpoint, *args, **kwargs)
has_next = payload.get('result', {}).setdefault(
'meta', {'next': None}
)['next']
# 3. Loop until the end
while has_next:
# 4. Increment the offset
kwargs['params']['offset'] += self.limit
# 5. Query again
_payload = self.request(endpoint, *args, **kwargs)
# 6. Add the paginated data to the global one
payload['result']['data'].extend(_payload['result']['data'])
# 7. Compute has_next
has_next = _payload['result']['meta']['next']
del payload['result']['meta']
return payload |
def __get_boxes(self):
"""
Get all the word boxes of this page.
"""
boxfile = self.__box_path
try:
box_builder = pyocr.builders.LineBoxBuilder()
with self.fs.open(boxfile, 'r') as file_desc:
boxes = box_builder.read_file(file_desc)
if boxes != []:
return boxes
# fallback: old format: word boxes
# shouldn't be used anymore ...
box_builder = pyocr.builders.WordBoxBuilder()
with self.fs.open(boxfile, 'r') as file_desc:
boxes = box_builder.read_file(file_desc)
if len(boxes) <= 0:
return []
logger.warning("WARNING: Doc %s uses old box format" %
(str(self.doc)))
return [pyocr.builders.LineBox(boxes, boxes[0].position)]
except IOError as exc:
logger.error("Unable to get boxes for '%s': %s"
% (self.doc.docid, exc))
return [] | Get all the word boxes of this page. | Below is the the instruction that describes the task:
### Input:
Get all the word boxes of this page.
### Response:
def __get_boxes(self):
"""
Get all the word boxes of this page.
"""
boxfile = self.__box_path
try:
box_builder = pyocr.builders.LineBoxBuilder()
with self.fs.open(boxfile, 'r') as file_desc:
boxes = box_builder.read_file(file_desc)
if boxes != []:
return boxes
# fallback: old format: word boxes
# shouldn't be used anymore ...
box_builder = pyocr.builders.WordBoxBuilder()
with self.fs.open(boxfile, 'r') as file_desc:
boxes = box_builder.read_file(file_desc)
if len(boxes) <= 0:
return []
logger.warning("WARNING: Doc %s uses old box format" %
(str(self.doc)))
return [pyocr.builders.LineBox(boxes, boxes[0].position)]
except IOError as exc:
logger.error("Unable to get boxes for '%s': %s"
% (self.doc.docid, exc))
return [] |
def generate_jobs(args, job_list, argument_string):
"""Generate actual scripts to be submitted to the cluster
:param args: argparse argument collection
:param job_list: dictionary containing each each job to be submitted
:param argument_string: string containing general arguments to be used by mvtest.py during execution
:return: None
"""
mvtest_path = args.mvpath
template = "".join(args.template.readlines())
logpath = os.path.abspath(args.logpath)
respath = os.path.abspath(args.res_path)
scriptpath = os.path.abspath(args.script_path)
pwd = os.path.abspath(os.getcwd())
for jobname in job_list.keys():
filename = "%s/%s.sh" % (scriptpath, jobname)
job_body = mvtest_path + " " + argument_string + " " + job_list[jobname]
contents = Template(template).safe_substitute(
logpath=logpath,
respath=respath,
body=job_body,
jobname=jobname,
memory=args.mem,
walltime=args.walltime,
pwd=pwd)
file = open(filename, "w")
print >> file,contents | Generate actual scripts to be submitted to the cluster
:param args: argparse argument collection
:param job_list: dictionary containing each each job to be submitted
:param argument_string: string containing general arguments to be used by mvtest.py during execution
:return: None | Below is the the instruction that describes the task:
### Input:
Generate actual scripts to be submitted to the cluster
:param args: argparse argument collection
:param job_list: dictionary containing each each job to be submitted
:param argument_string: string containing general arguments to be used by mvtest.py during execution
:return: None
### Response:
def generate_jobs(args, job_list, argument_string):
"""Generate actual scripts to be submitted to the cluster
:param args: argparse argument collection
:param job_list: dictionary containing each each job to be submitted
:param argument_string: string containing general arguments to be used by mvtest.py during execution
:return: None
"""
mvtest_path = args.mvpath
template = "".join(args.template.readlines())
logpath = os.path.abspath(args.logpath)
respath = os.path.abspath(args.res_path)
scriptpath = os.path.abspath(args.script_path)
pwd = os.path.abspath(os.getcwd())
for jobname in job_list.keys():
filename = "%s/%s.sh" % (scriptpath, jobname)
job_body = mvtest_path + " " + argument_string + " " + job_list[jobname]
contents = Template(template).safe_substitute(
logpath=logpath,
respath=respath,
body=job_body,
jobname=jobname,
memory=args.mem,
walltime=args.walltime,
pwd=pwd)
file = open(filename, "w")
print >> file,contents |
def main_dev(**kwargs):
"""Main entry point.
you-get-dev
"""
# Get (branch, commit) if running from a git repo.
head = git.get_head(kwargs['repo_path'])
# Get options and arguments.
try:
opts, args = getopt.getopt(sys.argv[1:], _short_options, _options)
except getopt.GetoptError as e:
log.wtf("""
[Fatal] {}.
Try '{} --help' for more options.""".format(e, script_name))
if not opts and not args:
# Display help.
print(_help)
# Enter GUI mode.
#from .gui import gui_main
#gui_main()
else:
conf = {}
for opt, arg in opts:
if opt in ('-h', '--help'):
# Display help.
print(_help)
elif opt in ('-V', '--version'):
# Display version.
log.println("you-get:", log.BOLD)
log.println(" version: {}".format(__version__))
if head is not None:
log.println(" branch: {}\n commit: {}".format(*head))
else:
log.println(" branch: {}\n commit: {}".format("(stable)", "(tag v{})".format(__version__)))
log.println(" platform: {}".format(platform.platform()))
log.println(" python: {}".format(sys.version.split('\n')[0]))
elif opt in ('-g', '--gui'):
# Run using GUI.
conf['gui'] = True
elif opt in ('-f', '--force'):
# Force download.
conf['force'] = True
elif opt in ('-l', '--playlist', '--playlists'):
# Download playlist whenever possible.
conf['playlist'] = True
if args:
if 'gui' in conf and conf['gui']:
# Enter GUI mode.
from .gui import gui_main
gui_main(*args, **conf)
else:
# Enter console mode.
from .console import console_main
console_main(*args, **conf) | Main entry point.
you-get-dev | Below is the the instruction that describes the task:
### Input:
Main entry point.
you-get-dev
### Response:
def main_dev(**kwargs):
"""Main entry point.
you-get-dev
"""
# Get (branch, commit) if running from a git repo.
head = git.get_head(kwargs['repo_path'])
# Get options and arguments.
try:
opts, args = getopt.getopt(sys.argv[1:], _short_options, _options)
except getopt.GetoptError as e:
log.wtf("""
[Fatal] {}.
Try '{} --help' for more options.""".format(e, script_name))
if not opts and not args:
# Display help.
print(_help)
# Enter GUI mode.
#from .gui import gui_main
#gui_main()
else:
conf = {}
for opt, arg in opts:
if opt in ('-h', '--help'):
# Display help.
print(_help)
elif opt in ('-V', '--version'):
# Display version.
log.println("you-get:", log.BOLD)
log.println(" version: {}".format(__version__))
if head is not None:
log.println(" branch: {}\n commit: {}".format(*head))
else:
log.println(" branch: {}\n commit: {}".format("(stable)", "(tag v{})".format(__version__)))
log.println(" platform: {}".format(platform.platform()))
log.println(" python: {}".format(sys.version.split('\n')[0]))
elif opt in ('-g', '--gui'):
# Run using GUI.
conf['gui'] = True
elif opt in ('-f', '--force'):
# Force download.
conf['force'] = True
elif opt in ('-l', '--playlist', '--playlists'):
# Download playlist whenever possible.
conf['playlist'] = True
if args:
if 'gui' in conf and conf['gui']:
# Enter GUI mode.
from .gui import gui_main
gui_main(*args, **conf)
else:
# Enter console mode.
from .console import console_main
console_main(*args, **conf) |
def get_conda_root():
"""Get the PREFIX of the conda installation.
Returns:
str: the ROOT_PREFIX of the conda installation
"""
try:
# Fast-path
# We're in the root environment
conda_root = _import_conda_root()
except ImportError:
# We're not in the root environment.
envs_dir = dirname(CONDA_PREFIX)
if basename(envs_dir) == 'envs':
# We're in a named environment: `conda create -n <name>`
conda_root = dirname(envs_dir)
else:
# We're in an isolated environment: `conda create -p <path>`
# The only way we can find out is by calling conda.
conda_root = _conda_root_from_conda_info()
return conda_root | Get the PREFIX of the conda installation.
Returns:
str: the ROOT_PREFIX of the conda installation | Below is the the instruction that describes the task:
### Input:
Get the PREFIX of the conda installation.
Returns:
str: the ROOT_PREFIX of the conda installation
### Response:
def get_conda_root():
"""Get the PREFIX of the conda installation.
Returns:
str: the ROOT_PREFIX of the conda installation
"""
try:
# Fast-path
# We're in the root environment
conda_root = _import_conda_root()
except ImportError:
# We're not in the root environment.
envs_dir = dirname(CONDA_PREFIX)
if basename(envs_dir) == 'envs':
# We're in a named environment: `conda create -n <name>`
conda_root = dirname(envs_dir)
else:
# We're in an isolated environment: `conda create -p <path>`
# The only way we can find out is by calling conda.
conda_root = _conda_root_from_conda_info()
return conda_root |
def get_metadata(self, filename):
'''Fetch all available metadata'''
dest = self.path(filename)
with open(dest, 'rb', buffering=0) as f:
checksum = 'sha1:{0}'.format(sha1(f))
return {
'checksum': checksum,
'size': os.path.getsize(dest),
'mime': files.mime(filename),
'modified': datetime.fromtimestamp(os.path.getmtime(dest)),
} | Fetch all available metadata | Below is the the instruction that describes the task:
### Input:
Fetch all available metadata
### Response:
def get_metadata(self, filename):
'''Fetch all available metadata'''
dest = self.path(filename)
with open(dest, 'rb', buffering=0) as f:
checksum = 'sha1:{0}'.format(sha1(f))
return {
'checksum': checksum,
'size': os.path.getsize(dest),
'mime': files.mime(filename),
'modified': datetime.fromtimestamp(os.path.getmtime(dest)),
} |
def register(self, target):
"""Registers url_rules on the blueprint
"""
for rule, options in self.url_rules:
target.add_url_rule(rule, self.name, self.dispatch_request, **options) | Registers url_rules on the blueprint | Below is the the instruction that describes the task:
### Input:
Registers url_rules on the blueprint
### Response:
def register(self, target):
"""Registers url_rules on the blueprint
"""
for rule, options in self.url_rules:
target.add_url_rule(rule, self.name, self.dispatch_request, **options) |
def _estimate_param_scan_worker(estimator, params, X, evaluate, evaluate_args,
failfast, return_exceptions):
""" Method that runs estimation for several parameter settings.
Defined as a worker for parallelization
"""
# run estimation
model = None
try: # catch any exception
estimator.estimate(X, **params)
model = estimator.model
except KeyboardInterrupt:
# we want to be able to interactively interrupt the worker, no matter of failfast=False.
raise
except:
e = sys.exc_info()[1]
if isinstance(estimator, Loggable):
estimator.logger.warning("Ignored error during estimation: %s" % e)
if failfast:
raise # re-raise
elif return_exceptions:
model = e
else:
pass # just return model=None
# deal with results
res = []
# deal with result
if evaluate is None: # we want full models
res.append(model)
# we want to evaluate function(s) of the model
elif _types.is_iterable(evaluate):
values = [] # the function values the model
for ieval, name in enumerate(evaluate):
# get method/attribute name and arguments to be evaluated
#name = evaluate[ieval]
args = ()
if evaluate_args is not None:
args = evaluate_args[ieval]
# wrap single arguments in an iterable again to pass them.
if _types.is_string(args):
args = (args, )
# evaluate
try:
# try calling method/property/attribute
value = _call_member(estimator.model, name, failfast, *args)
# couldn't find method/property/attribute
except AttributeError as e:
if failfast:
raise e # raise an AttributeError
else:
value = None # we just ignore it and return None
values.append(value)
# if we only have one value, unpack it
if len(values) == 1:
values = values[0]
res.append(values)
else:
raise ValueError('Invalid setting for evaluate: ' + str(evaluate))
if len(res) == 1:
res = res[0]
return res | Method that runs estimation for several parameter settings.
Defined as a worker for parallelization | Below is the the instruction that describes the task:
### Input:
Method that runs estimation for several parameter settings.
Defined as a worker for parallelization
### Response:
def _estimate_param_scan_worker(estimator, params, X, evaluate, evaluate_args,
failfast, return_exceptions):
""" Method that runs estimation for several parameter settings.
Defined as a worker for parallelization
"""
# run estimation
model = None
try: # catch any exception
estimator.estimate(X, **params)
model = estimator.model
except KeyboardInterrupt:
# we want to be able to interactively interrupt the worker, no matter of failfast=False.
raise
except:
e = sys.exc_info()[1]
if isinstance(estimator, Loggable):
estimator.logger.warning("Ignored error during estimation: %s" % e)
if failfast:
raise # re-raise
elif return_exceptions:
model = e
else:
pass # just return model=None
# deal with results
res = []
# deal with result
if evaluate is None: # we want full models
res.append(model)
# we want to evaluate function(s) of the model
elif _types.is_iterable(evaluate):
values = [] # the function values the model
for ieval, name in enumerate(evaluate):
# get method/attribute name and arguments to be evaluated
#name = evaluate[ieval]
args = ()
if evaluate_args is not None:
args = evaluate_args[ieval]
# wrap single arguments in an iterable again to pass them.
if _types.is_string(args):
args = (args, )
# evaluate
try:
# try calling method/property/attribute
value = _call_member(estimator.model, name, failfast, *args)
# couldn't find method/property/attribute
except AttributeError as e:
if failfast:
raise e # raise an AttributeError
else:
value = None # we just ignore it and return None
values.append(value)
# if we only have one value, unpack it
if len(values) == 1:
values = values[0]
res.append(values)
else:
raise ValueError('Invalid setting for evaluate: ' + str(evaluate))
if len(res) == 1:
res = res[0]
return res |
def _user_thread_main(self, target):
"""Main entry point for the thread that will run user's code."""
try:
# Run user's code.
return_code = target()
# Assume good result (0 return code) if none is returned.
if return_code is None:
return_code = 0
# Call exit on the main thread when user code has finished.
AppHelper.callAfter(lambda: sys.exit(return_code))
except Exception as ex:
# Something went wrong. Raise the exception on the main thread to exit.
AppHelper.callAfter(self._raise_error, sys.exc_info()) | Main entry point for the thread that will run user's code. | Below is the the instruction that describes the task:
### Input:
Main entry point for the thread that will run user's code.
### Response:
def _user_thread_main(self, target):
"""Main entry point for the thread that will run user's code."""
try:
# Run user's code.
return_code = target()
# Assume good result (0 return code) if none is returned.
if return_code is None:
return_code = 0
# Call exit on the main thread when user code has finished.
AppHelper.callAfter(lambda: sys.exit(return_code))
except Exception as ex:
# Something went wrong. Raise the exception on the main thread to exit.
AppHelper.callAfter(self._raise_error, sys.exc_info()) |
def get_homes(self, query=None, gps_lat=None, gps_lng=None, offset=0, items_per_grid=8):
"""
Search listings with
* Query (e.g. query="Lisbon, Portugal") or
* Location (e.g. gps_lat=55.6123352&gps_lng=37.7117917)
"""
params = {
'is_guided_search': 'true',
'version': '1.3.9',
'section_offset': '0',
'items_offset': str(offset),
'adults': '0',
'screen_size': 'small',
'source': 'explore_tabs',
'items_per_grid': str(items_per_grid),
'_format': 'for_explore_search_native',
'metadata_only': 'false',
'refinement_paths[]': '/homes',
'timezone': 'Europe/Lisbon',
'satori_version': '1.0.7'
}
if not query and not (gps_lat and gps_lng):
raise MissingParameterError("Missing query or gps coordinates")
if query:
params['query'] = query
if gps_lat and gps_lng:
params['gps_lat'] = gps_lat
params['gps_lng'] = gps_lng
r = self._session.get(API_URL + '/explore_tabs', params=params)
r.raise_for_status()
return r.json() | Search listings with
* Query (e.g. query="Lisbon, Portugal") or
* Location (e.g. gps_lat=55.6123352&gps_lng=37.7117917) | Below is the the instruction that describes the task:
### Input:
Search listings with
* Query (e.g. query="Lisbon, Portugal") or
* Location (e.g. gps_lat=55.6123352&gps_lng=37.7117917)
### Response:
def get_homes(self, query=None, gps_lat=None, gps_lng=None, offset=0, items_per_grid=8):
"""
Search listings with
* Query (e.g. query="Lisbon, Portugal") or
* Location (e.g. gps_lat=55.6123352&gps_lng=37.7117917)
"""
params = {
'is_guided_search': 'true',
'version': '1.3.9',
'section_offset': '0',
'items_offset': str(offset),
'adults': '0',
'screen_size': 'small',
'source': 'explore_tabs',
'items_per_grid': str(items_per_grid),
'_format': 'for_explore_search_native',
'metadata_only': 'false',
'refinement_paths[]': '/homes',
'timezone': 'Europe/Lisbon',
'satori_version': '1.0.7'
}
if not query and not (gps_lat and gps_lng):
raise MissingParameterError("Missing query or gps coordinates")
if query:
params['query'] = query
if gps_lat and gps_lng:
params['gps_lat'] = gps_lat
params['gps_lng'] = gps_lng
r = self._session.get(API_URL + '/explore_tabs', params=params)
r.raise_for_status()
return r.json() |
def connect(
project_id: Optional[str] = None,
dataset_id: Optional[str] = None,
credentials: Optional[google.auth.credentials.Credentials] = None,
) -> BigQueryClient:
"""Create a BigQueryClient for use with Ibis.
Parameters
----------
project_id : str
A BigQuery project id.
dataset_id : str
A dataset id that lives inside of the project indicated by
`project_id`.
credentials : google.auth.credentials.Credentials
Returns
-------
BigQueryClient
"""
if credentials is None:
credentials_cache = pydata_google_auth.cache.ReadWriteCredentialsCache(
filename="ibis.json"
)
credentials, project_id = pydata_google_auth.default(
SCOPES,
client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
credentials_cache=credentials_cache,
)
return BigQueryClient(
project_id, dataset_id=dataset_id, credentials=credentials
) | Create a BigQueryClient for use with Ibis.
Parameters
----------
project_id : str
A BigQuery project id.
dataset_id : str
A dataset id that lives inside of the project indicated by
`project_id`.
credentials : google.auth.credentials.Credentials
Returns
-------
BigQueryClient | Below is the the instruction that describes the task:
### Input:
Create a BigQueryClient for use with Ibis.
Parameters
----------
project_id : str
A BigQuery project id.
dataset_id : str
A dataset id that lives inside of the project indicated by
`project_id`.
credentials : google.auth.credentials.Credentials
Returns
-------
BigQueryClient
### Response:
def connect(
project_id: Optional[str] = None,
dataset_id: Optional[str] = None,
credentials: Optional[google.auth.credentials.Credentials] = None,
) -> BigQueryClient:
"""Create a BigQueryClient for use with Ibis.
Parameters
----------
project_id : str
A BigQuery project id.
dataset_id : str
A dataset id that lives inside of the project indicated by
`project_id`.
credentials : google.auth.credentials.Credentials
Returns
-------
BigQueryClient
"""
if credentials is None:
credentials_cache = pydata_google_auth.cache.ReadWriteCredentialsCache(
filename="ibis.json"
)
credentials, project_id = pydata_google_auth.default(
SCOPES,
client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
credentials_cache=credentials_cache,
)
return BigQueryClient(
project_id, dataset_id=dataset_id, credentials=credentials
) |
def rename_tabs_after_change(self, given_name):
"""Rename tabs after a change in name."""
client = self.get_current_client()
# Prevent renames that want to assign the same name of
# a previous tab
repeated = False
for cl in self.get_clients():
if id(client) != id(cl) and given_name == cl.given_name:
repeated = True
break
# Rename current client tab to add str_id
if client.allow_rename and not u'/' in given_name and not repeated:
self.rename_client_tab(client, given_name)
else:
self.rename_client_tab(client, None)
# Rename related clients
if client.allow_rename and not u'/' in given_name and not repeated:
for cl in self.get_related_clients(client):
self.rename_client_tab(cl, given_name) | Rename tabs after a change in name. | Below is the the instruction that describes the task:
### Input:
Rename tabs after a change in name.
### Response:
def rename_tabs_after_change(self, given_name):
"""Rename tabs after a change in name."""
client = self.get_current_client()
# Prevent renames that want to assign the same name of
# a previous tab
repeated = False
for cl in self.get_clients():
if id(client) != id(cl) and given_name == cl.given_name:
repeated = True
break
# Rename current client tab to add str_id
if client.allow_rename and not u'/' in given_name and not repeated:
self.rename_client_tab(client, given_name)
else:
self.rename_client_tab(client, None)
# Rename related clients
if client.allow_rename and not u'/' in given_name and not repeated:
for cl in self.get_related_clients(client):
self.rename_client_tab(cl, given_name) |
def build_send(self, entity: BaseEntity, from_user: UserType, to_user_key: RsaKey = None) -> Union[str, Dict]:
"""
Build POST data for sending out to remotes.
:param entity: The outbound ready entity for this protocol.
:param from_user: The user sending this payload. Must have ``private_key`` and ``id`` properties.
:param to_user_key: (Optional) Public key of user we're sending a private payload to.
:returns: dict or string depending on if private or public payload.
"""
if hasattr(entity, "outbound_doc") and entity.outbound_doc is not None:
# Use pregenerated outbound document
rendered = entity.outbound_doc
else:
rendered = entity.to_as2()
return rendered | Build POST data for sending out to remotes.
:param entity: The outbound ready entity for this protocol.
:param from_user: The user sending this payload. Must have ``private_key`` and ``id`` properties.
:param to_user_key: (Optional) Public key of user we're sending a private payload to.
:returns: dict or string depending on if private or public payload. | Below is the the instruction that describes the task:
### Input:
Build POST data for sending out to remotes.
:param entity: The outbound ready entity for this protocol.
:param from_user: The user sending this payload. Must have ``private_key`` and ``id`` properties.
:param to_user_key: (Optional) Public key of user we're sending a private payload to.
:returns: dict or string depending on if private or public payload.
### Response:
def build_send(self, entity: BaseEntity, from_user: UserType, to_user_key: RsaKey = None) -> Union[str, Dict]:
"""
Build POST data for sending out to remotes.
:param entity: The outbound ready entity for this protocol.
:param from_user: The user sending this payload. Must have ``private_key`` and ``id`` properties.
:param to_user_key: (Optional) Public key of user we're sending a private payload to.
:returns: dict or string depending on if private or public payload.
"""
if hasattr(entity, "outbound_doc") and entity.outbound_doc is not None:
# Use pregenerated outbound document
rendered = entity.outbound_doc
else:
rendered = entity.to_as2()
return rendered |
def update_token(self):
"""Request a new token and store it for future use"""
logger.info('updating token')
if None in self.credentials.values():
raise RuntimeError("You must provide an username and a password")
credentials = dict(auth=self.credentials)
url = self.test_url if self.test else self.url
response = requests.post(url + "auth",
json=credentials)
data = response.json()["response"]
if "error_id" in data and data["error_id"] == "NOAUTH":
raise BadCredentials()
if "error_code" in data and data["error_code"] == "RATE_EXCEEDED":
time.sleep(150)
return
if "error_code" in data or "error_id" in data:
raise AppNexusException(response)
self.token = data["token"]
self.save_token()
return self.token | Request a new token and store it for future use | Below is the the instruction that describes the task:
### Input:
Request a new token and store it for future use
### Response:
def update_token(self):
"""Request a new token and store it for future use"""
logger.info('updating token')
if None in self.credentials.values():
raise RuntimeError("You must provide an username and a password")
credentials = dict(auth=self.credentials)
url = self.test_url if self.test else self.url
response = requests.post(url + "auth",
json=credentials)
data = response.json()["response"]
if "error_id" in data and data["error_id"] == "NOAUTH":
raise BadCredentials()
if "error_code" in data and data["error_code"] == "RATE_EXCEEDED":
time.sleep(150)
return
if "error_code" in data or "error_id" in data:
raise AppNexusException(response)
self.token = data["token"]
self.save_token()
return self.token |
def mesh(self,xyzs):
"""
Evaluate basis function on a mesh of points *xyz*.
"""
I,J,K = self.powers
d = np.asarray(xyzs,'d')-self.origin
# Got help from stackoverflow user @unutbu with this.
# See: http://stackoverflow.com/questions/17391052/compute-square-distances-from-numpy-array
d2 = np.einsum('ij,ij -> i',d,d)
return self.norm*d[:,0]**I*d[:,1]**J*d[:,2]**K*np.exp(-self.exponent*d2) | Evaluate basis function on a mesh of points *xyz*. | Below is the the instruction that describes the task:
### Input:
Evaluate basis function on a mesh of points *xyz*.
### Response:
def mesh(self,xyzs):
"""
Evaluate basis function on a mesh of points *xyz*.
"""
I,J,K = self.powers
d = np.asarray(xyzs,'d')-self.origin
# Got help from stackoverflow user @unutbu with this.
# See: http://stackoverflow.com/questions/17391052/compute-square-distances-from-numpy-array
d2 = np.einsum('ij,ij -> i',d,d)
return self.norm*d[:,0]**I*d[:,1]**J*d[:,2]**K*np.exp(-self.exponent*d2) |
def compute_stable_poses(self,
center_mass=None,
sigma=0.0,
n_samples=1,
threshold=0.0):
"""
Computes stable orientations of a mesh and their quasi-static probabilites.
This method samples the location of the center of mass from a multivariate
gaussian (mean at com, cov equal to identity times sigma) over n_samples.
For each sample, it computes the stable resting poses of the mesh on a
a planar workspace and evaulates the probabilities of landing in
each pose if the object is dropped onto the table randomly.
This method returns the 4x4 homogenous transform matrices that place
the shape against the planar surface with the z-axis pointing upwards
and a list of the probabilities for each pose.
The transforms and probabilties that are returned are sorted, with the
most probable pose first.
Parameters
----------
center_mass : (3,) float
The object center of mass (if None, this method
assumes uniform density and watertightness and
computes a center of mass explicitly)
sigma : float
The covariance for the multivariate gaussian used
to sample center of mass locations
n_samples : int
The number of samples of the center of mass location
threshold : float
The probability value at which to threshold
returned stable poses
Returns
-------
transforms : (n, 4, 4) float
The homogenous matrices that transform the
object to rest in a stable pose, with the
new z-axis pointing upwards from the table
and the object just touching the table.
probs : (n,) float
A probability ranging from 0.0 to 1.0 for each pose
"""
return poses.compute_stable_poses(mesh=self,
center_mass=center_mass,
sigma=sigma,
n_samples=n_samples,
threshold=threshold) | Computes stable orientations of a mesh and their quasi-static probabilites.
This method samples the location of the center of mass from a multivariate
gaussian (mean at com, cov equal to identity times sigma) over n_samples.
For each sample, it computes the stable resting poses of the mesh on a
a planar workspace and evaulates the probabilities of landing in
each pose if the object is dropped onto the table randomly.
This method returns the 4x4 homogenous transform matrices that place
the shape against the planar surface with the z-axis pointing upwards
and a list of the probabilities for each pose.
The transforms and probabilties that are returned are sorted, with the
most probable pose first.
Parameters
----------
center_mass : (3,) float
The object center of mass (if None, this method
assumes uniform density and watertightness and
computes a center of mass explicitly)
sigma : float
The covariance for the multivariate gaussian used
to sample center of mass locations
n_samples : int
The number of samples of the center of mass location
threshold : float
The probability value at which to threshold
returned stable poses
Returns
-------
transforms : (n, 4, 4) float
The homogenous matrices that transform the
object to rest in a stable pose, with the
new z-axis pointing upwards from the table
and the object just touching the table.
probs : (n,) float
A probability ranging from 0.0 to 1.0 for each pose | Below is the the instruction that describes the task:
### Input:
Computes stable orientations of a mesh and their quasi-static probabilites.
This method samples the location of the center of mass from a multivariate
gaussian (mean at com, cov equal to identity times sigma) over n_samples.
For each sample, it computes the stable resting poses of the mesh on a
a planar workspace and evaulates the probabilities of landing in
each pose if the object is dropped onto the table randomly.
This method returns the 4x4 homogenous transform matrices that place
the shape against the planar surface with the z-axis pointing upwards
and a list of the probabilities for each pose.
The transforms and probabilties that are returned are sorted, with the
most probable pose first.
Parameters
----------
center_mass : (3,) float
The object center of mass (if None, this method
assumes uniform density and watertightness and
computes a center of mass explicitly)
sigma : float
The covariance for the multivariate gaussian used
to sample center of mass locations
n_samples : int
The number of samples of the center of mass location
threshold : float
The probability value at which to threshold
returned stable poses
Returns
-------
transforms : (n, 4, 4) float
The homogenous matrices that transform the
object to rest in a stable pose, with the
new z-axis pointing upwards from the table
and the object just touching the table.
probs : (n,) float
A probability ranging from 0.0 to 1.0 for each pose
### Response:
def compute_stable_poses(self,
center_mass=None,
sigma=0.0,
n_samples=1,
threshold=0.0):
"""
Computes stable orientations of a mesh and their quasi-static probabilites.
This method samples the location of the center of mass from a multivariate
gaussian (mean at com, cov equal to identity times sigma) over n_samples.
For each sample, it computes the stable resting poses of the mesh on a
a planar workspace and evaulates the probabilities of landing in
each pose if the object is dropped onto the table randomly.
This method returns the 4x4 homogenous transform matrices that place
the shape against the planar surface with the z-axis pointing upwards
and a list of the probabilities for each pose.
The transforms and probabilties that are returned are sorted, with the
most probable pose first.
Parameters
----------
center_mass : (3,) float
The object center of mass (if None, this method
assumes uniform density and watertightness and
computes a center of mass explicitly)
sigma : float
The covariance for the multivariate gaussian used
to sample center of mass locations
n_samples : int
The number of samples of the center of mass location
threshold : float
The probability value at which to threshold
returned stable poses
Returns
-------
transforms : (n, 4, 4) float
The homogenous matrices that transform the
object to rest in a stable pose, with the
new z-axis pointing upwards from the table
and the object just touching the table.
probs : (n,) float
A probability ranging from 0.0 to 1.0 for each pose
"""
return poses.compute_stable_poses(mesh=self,
center_mass=center_mass,
sigma=sigma,
n_samples=n_samples,
threshold=threshold) |
def transform(self, data, centers=None):
""" Find entries of all hypercubes. If `centers=None`, then use `self.centers_` as computed in `self.fit`.
Empty hypercubes are removed from the result
Parameters
===========
data: array-like
Data to find in entries in cube. Warning: first column must be index column.
centers: list of array-like
Center points for all cubes as returned by `self.fit`. Default is to use `self.centers_`.
Returns
=========
hypercubes: list of array-like
list of entries in each hypercobe in `data`.
"""
centers = centers or self.centers_
hypercubes = [
self.transform_single(data, cube, i) for i, cube in enumerate(centers)
]
# Clean out any empty cubes (common in high dimensions)
hypercubes = [cube for cube in hypercubes if len(cube)]
return hypercubes | Find entries of all hypercubes. If `centers=None`, then use `self.centers_` as computed in `self.fit`.
Empty hypercubes are removed from the result
Parameters
===========
data: array-like
Data to find in entries in cube. Warning: first column must be index column.
centers: list of array-like
Center points for all cubes as returned by `self.fit`. Default is to use `self.centers_`.
Returns
=========
hypercubes: list of array-like
list of entries in each hypercobe in `data`. | Below is the the instruction that describes the task:
### Input:
Find entries of all hypercubes. If `centers=None`, then use `self.centers_` as computed in `self.fit`.
Empty hypercubes are removed from the result
Parameters
===========
data: array-like
Data to find in entries in cube. Warning: first column must be index column.
centers: list of array-like
Center points for all cubes as returned by `self.fit`. Default is to use `self.centers_`.
Returns
=========
hypercubes: list of array-like
list of entries in each hypercobe in `data`.
### Response:
def transform(self, data, centers=None):
""" Find entries of all hypercubes. If `centers=None`, then use `self.centers_` as computed in `self.fit`.
Empty hypercubes are removed from the result
Parameters
===========
data: array-like
Data to find in entries in cube. Warning: first column must be index column.
centers: list of array-like
Center points for all cubes as returned by `self.fit`. Default is to use `self.centers_`.
Returns
=========
hypercubes: list of array-like
list of entries in each hypercobe in `data`.
"""
centers = centers or self.centers_
hypercubes = [
self.transform_single(data, cube, i) for i, cube in enumerate(centers)
]
# Clean out any empty cubes (common in high dimensions)
hypercubes = [cube for cube in hypercubes if len(cube)]
return hypercubes |
def help(self, *args):
"""
Show help
"""
res = ''
if len(args) == 0:
res = 'Help:\n'
for name, value in inspect.getmembers(self):
if not inspect.isgeneratorfunction(value):
continue
if name.startswith('_') or (len(args) and name != args[0]) or name == 'run':
continue
doc = inspect.getdoc(value)
res += name
if len(args) and doc:
res += ': ' + doc
elif doc:
res += ': ' + doc.split('\n')[0]
res += '\n'
if len(args) == 0:
res += '\nhelp command for details about a command\n'
return res | Show help | Below is the the instruction that describes the task:
### Input:
Show help
### Response:
def help(self, *args):
"""
Show help
"""
res = ''
if len(args) == 0:
res = 'Help:\n'
for name, value in inspect.getmembers(self):
if not inspect.isgeneratorfunction(value):
continue
if name.startswith('_') or (len(args) and name != args[0]) or name == 'run':
continue
doc = inspect.getdoc(value)
res += name
if len(args) and doc:
res += ': ' + doc
elif doc:
res += ': ' + doc.split('\n')[0]
res += '\n'
if len(args) == 0:
res += '\nhelp command for details about a command\n'
return res |
def _make_spec_file(self):
"""
Customize spec file inserting %config section
"""
spec_file = setuptools.command.bdist_rpm.bdist_rpm._make_spec_file(self)
spec_file.append('%config(noreplace) /etc/lograptor/lograptor.conf')
spec_file.append('%config(noreplace) /etc/lograptor/report_template.*')
spec_file.append('%config(noreplace) /etc/lograptor/conf.d/*.conf')
return spec_file | Customize spec file inserting %config section | Below is the the instruction that describes the task:
### Input:
Customize spec file inserting %config section
### Response:
def _make_spec_file(self):
"""
Customize spec file inserting %config section
"""
spec_file = setuptools.command.bdist_rpm.bdist_rpm._make_spec_file(self)
spec_file.append('%config(noreplace) /etc/lograptor/lograptor.conf')
spec_file.append('%config(noreplace) /etc/lograptor/report_template.*')
spec_file.append('%config(noreplace) /etc/lograptor/conf.d/*.conf')
return spec_file |
def rename_command(source, destination):
"""
Executor for `globus rename`
"""
source_ep, source_path = source
dest_ep, dest_path = destination
if source_ep != dest_ep:
raise click.UsageError(
(
"rename requires that the source and dest "
"endpoints are the same, {} != {}"
).format(source_ep, dest_ep)
)
endpoint_id = source_ep
client = get_client()
autoactivate(client, endpoint_id, if_expires_in=60)
res = client.operation_rename(endpoint_id, oldpath=source_path, newpath=dest_path)
formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="message") | Executor for `globus rename` | Below is the the instruction that describes the task:
### Input:
Executor for `globus rename`
### Response:
def rename_command(source, destination):
"""
Executor for `globus rename`
"""
source_ep, source_path = source
dest_ep, dest_path = destination
if source_ep != dest_ep:
raise click.UsageError(
(
"rename requires that the source and dest "
"endpoints are the same, {} != {}"
).format(source_ep, dest_ep)
)
endpoint_id = source_ep
client = get_client()
autoactivate(client, endpoint_id, if_expires_in=60)
res = client.operation_rename(endpoint_id, oldpath=source_path, newpath=dest_path)
formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="message") |
def check_args(args):
"""
Parse arguments and check if the arguments are valid
"""
if not os.path.exists(args.fd):
print("Not a valid path", args.fd, file=ERROR_LOG)
return [], [], False
if args.fl is not None:
# we already ensure the file can be opened and opened the file
file_line = args.fl.readline()
amr_ids = file_line.strip().split()
elif args.f is None:
print("No AMR ID was given", file=ERROR_LOG)
return [], [], False
else:
amr_ids = args.f
names = []
check_name = True
if args.p is None:
names = get_names(args.fd, amr_ids)
# no need to check names
check_name = False
if len(names) == 0:
print("Cannot find any user who tagged these AMR", file=ERROR_LOG)
return [], [], False
else:
names = args.p
if len(names) == 0:
print("No user was given", file=ERROR_LOG)
return [], [], False
if len(names) == 1:
print("Only one user is given. Smatch calculation requires at least two users.", file=ERROR_LOG)
return [], [], False
if "consensus" in names:
con_index = names.index("consensus")
names.pop(con_index)
names.append("consensus")
# check if all the AMR_id and user combinations are valid
if check_name:
pop_name = []
for i, name in enumerate(names):
for amr in amr_ids:
amr_path = args.fd + name + "/" + amr + ".txt"
if not os.path.exists(amr_path):
print("User", name, "fails to tag AMR", amr, file=ERROR_LOG)
pop_name.append(i)
break
if len(pop_name) != 0:
pop_num = 0
for p in pop_name:
print("Deleting user", names[p - pop_num], "from the name list", file=ERROR_LOG)
names.pop(p - pop_num)
pop_num += 1
if len(names) < 2:
print("Not enough users to evaluate. Smatch requires >2 users who tag all the AMRs", file=ERROR_LOG)
return "", "", False
return amr_ids, names, True | Parse arguments and check if the arguments are valid | Below is the the instruction that describes the task:
### Input:
Parse arguments and check if the arguments are valid
### Response:
def check_args(args):
"""
Parse arguments and check if the arguments are valid
"""
if not os.path.exists(args.fd):
print("Not a valid path", args.fd, file=ERROR_LOG)
return [], [], False
if args.fl is not None:
# we already ensure the file can be opened and opened the file
file_line = args.fl.readline()
amr_ids = file_line.strip().split()
elif args.f is None:
print("No AMR ID was given", file=ERROR_LOG)
return [], [], False
else:
amr_ids = args.f
names = []
check_name = True
if args.p is None:
names = get_names(args.fd, amr_ids)
# no need to check names
check_name = False
if len(names) == 0:
print("Cannot find any user who tagged these AMR", file=ERROR_LOG)
return [], [], False
else:
names = args.p
if len(names) == 0:
print("No user was given", file=ERROR_LOG)
return [], [], False
if len(names) == 1:
print("Only one user is given. Smatch calculation requires at least two users.", file=ERROR_LOG)
return [], [], False
if "consensus" in names:
con_index = names.index("consensus")
names.pop(con_index)
names.append("consensus")
# check if all the AMR_id and user combinations are valid
if check_name:
pop_name = []
for i, name in enumerate(names):
for amr in amr_ids:
amr_path = args.fd + name + "/" + amr + ".txt"
if not os.path.exists(amr_path):
print("User", name, "fails to tag AMR", amr, file=ERROR_LOG)
pop_name.append(i)
break
if len(pop_name) != 0:
pop_num = 0
for p in pop_name:
print("Deleting user", names[p - pop_num], "from the name list", file=ERROR_LOG)
names.pop(p - pop_num)
pop_num += 1
if len(names) < 2:
print("Not enough users to evaluate. Smatch requires >2 users who tag all the AMRs", file=ERROR_LOG)
return "", "", False
return amr_ids, names, True |
def _move_files_to_compute(compute, project_id, directory, files_path):
"""
Move the files to a remote compute
"""
location = os.path.join(directory, files_path)
if os.path.exists(location):
for (dirpath, dirnames, filenames) in os.walk(location):
for filename in filenames:
path = os.path.join(dirpath, filename)
dst = os.path.relpath(path, directory)
yield from _upload_file(compute, project_id, path, dst)
shutil.rmtree(os.path.join(directory, files_path)) | Move the files to a remote compute | Below is the the instruction that describes the task:
### Input:
Move the files to a remote compute
### Response:
def _move_files_to_compute(compute, project_id, directory, files_path):
"""
Move the files to a remote compute
"""
location = os.path.join(directory, files_path)
if os.path.exists(location):
for (dirpath, dirnames, filenames) in os.walk(location):
for filename in filenames:
path = os.path.join(dirpath, filename)
dst = os.path.relpath(path, directory)
yield from _upload_file(compute, project_id, path, dst)
shutil.rmtree(os.path.join(directory, files_path)) |
def __get_metrics(self):
""" Each metric must have its own filters copy to modify it freely"""
esfilters_merge = None
esfilters_abandon = None
if self.esfilters:
esfilters_merge = self.esfilters.copy()
esfilters_abandon = self.esfilters.copy()
merged = Merged(self.es_url, self.es_index,
start=self.start, end=self.end,
esfilters=esfilters_merge, interval=self.interval)
abandoned = Abandoned(self.es_url, self.es_index,
start=self.start, end=self.end,
esfilters=esfilters_abandon, interval=self.interval)
return (merged, abandoned) | Each metric must have its own filters copy to modify it freely | Below is the the instruction that describes the task:
### Input:
Each metric must have its own filters copy to modify it freely
### Response:
def __get_metrics(self):
""" Each metric must have its own filters copy to modify it freely"""
esfilters_merge = None
esfilters_abandon = None
if self.esfilters:
esfilters_merge = self.esfilters.copy()
esfilters_abandon = self.esfilters.copy()
merged = Merged(self.es_url, self.es_index,
start=self.start, end=self.end,
esfilters=esfilters_merge, interval=self.interval)
abandoned = Abandoned(self.es_url, self.es_index,
start=self.start, end=self.end,
esfilters=esfilters_abandon, interval=self.interval)
return (merged, abandoned) |
def sorted_keywords_by_order(keywords, order):
"""Sort keywords based on defined order.
:param keywords: Keyword to be sorted.
:type keywords: dict
:param order: Ordered list of key.
:type order: list
:return: Ordered dictionary based on order list.
:rtype: OrderedDict
"""
# we need to delete item with no value
for key, value in list(keywords.items()):
if value is None:
del keywords[key]
ordered_keywords = OrderedDict()
for key in order:
if key in list(keywords.keys()):
ordered_keywords[key] = keywords.get(key)
for keyword in keywords:
if keyword not in order:
ordered_keywords[keyword] = keywords.get(keyword)
return ordered_keywords | Sort keywords based on defined order.
:param keywords: Keyword to be sorted.
:type keywords: dict
:param order: Ordered list of key.
:type order: list
:return: Ordered dictionary based on order list.
:rtype: OrderedDict | Below is the the instruction that describes the task:
### Input:
Sort keywords based on defined order.
:param keywords: Keyword to be sorted.
:type keywords: dict
:param order: Ordered list of key.
:type order: list
:return: Ordered dictionary based on order list.
:rtype: OrderedDict
### Response:
def sorted_keywords_by_order(keywords, order):
"""Sort keywords based on defined order.
:param keywords: Keyword to be sorted.
:type keywords: dict
:param order: Ordered list of key.
:type order: list
:return: Ordered dictionary based on order list.
:rtype: OrderedDict
"""
# we need to delete item with no value
for key, value in list(keywords.items()):
if value is None:
del keywords[key]
ordered_keywords = OrderedDict()
for key in order:
if key in list(keywords.keys()):
ordered_keywords[key] = keywords.get(key)
for keyword in keywords:
if keyword not in order:
ordered_keywords[keyword] = keywords.get(keyword)
return ordered_keywords |
def detectSyntax(self,
xmlFileName=None,
mimeType=None,
language=None,
sourceFilePath=None,
firstLine=None):
"""Get syntax by next parameters (fill as many, as known):
* name of XML file with syntax definition
* MIME type of source file
* Programming language name
* Source file path
* First line of source file
First parameter in the list has the hightest priority.
Old syntax is always cleared, even if failed to detect new.
Method returns ``True``, if syntax is detected, and ``False`` otherwise
"""
oldLanguage = self.language()
self.clearSyntax()
syntax = self._globalSyntaxManager.getSyntax(xmlFileName=xmlFileName,
mimeType=mimeType,
languageName=language,
sourceFilePath=sourceFilePath,
firstLine=firstLine)
if syntax is not None:
self._highlighter = SyntaxHighlighter(syntax, self)
self._indenter.setSyntax(syntax)
keywords = {kw for kwList in syntax.parser.lists.values() for kw in kwList}
self._completer.setKeywords(keywords)
newLanguage = self.language()
if oldLanguage != newLanguage:
self.languageChanged.emit(newLanguage)
return syntax is not None | Get syntax by next parameters (fill as many, as known):
* name of XML file with syntax definition
* MIME type of source file
* Programming language name
* Source file path
* First line of source file
First parameter in the list has the hightest priority.
Old syntax is always cleared, even if failed to detect new.
Method returns ``True``, if syntax is detected, and ``False`` otherwise | Below is the the instruction that describes the task:
### Input:
Get syntax by next parameters (fill as many, as known):
* name of XML file with syntax definition
* MIME type of source file
* Programming language name
* Source file path
* First line of source file
First parameter in the list has the hightest priority.
Old syntax is always cleared, even if failed to detect new.
Method returns ``True``, if syntax is detected, and ``False`` otherwise
### Response:
def detectSyntax(self,
xmlFileName=None,
mimeType=None,
language=None,
sourceFilePath=None,
firstLine=None):
"""Get syntax by next parameters (fill as many, as known):
* name of XML file with syntax definition
* MIME type of source file
* Programming language name
* Source file path
* First line of source file
First parameter in the list has the hightest priority.
Old syntax is always cleared, even if failed to detect new.
Method returns ``True``, if syntax is detected, and ``False`` otherwise
"""
oldLanguage = self.language()
self.clearSyntax()
syntax = self._globalSyntaxManager.getSyntax(xmlFileName=xmlFileName,
mimeType=mimeType,
languageName=language,
sourceFilePath=sourceFilePath,
firstLine=firstLine)
if syntax is not None:
self._highlighter = SyntaxHighlighter(syntax, self)
self._indenter.setSyntax(syntax)
keywords = {kw for kwList in syntax.parser.lists.values() for kw in kwList}
self._completer.setKeywords(keywords)
newLanguage = self.language()
if oldLanguage != newLanguage:
self.languageChanged.emit(newLanguage)
return syntax is not None |
def retrieve(pdb_id, cache_dir = None, strict = True, parse_ligands = False):
'''Creates a PDB object by using a cached copy of the file if it exists or by retrieving the file from the RCSB.'''
# Check to see whether we have a cached copy
pdb_id = pdb_id.upper()
if cache_dir:
filename = os.path.join(cache_dir, "%s.pdb" % pdb_id)
if os.path.exists(filename):
return PDB(read_file(filename), strict = strict, parse_ligands = parse_ligands)
# Get a copy from the RCSB
contents = rcsb.retrieve_pdb(pdb_id)
# Create a cached copy if appropriate
if cache_dir:
write_file(os.path.join(cache_dir, "%s.pdb" % pdb_id), contents)
# Return the object
return PDB(contents, strict = strict, parse_ligands = parse_ligands) | Creates a PDB object by using a cached copy of the file if it exists or by retrieving the file from the RCSB. | Below is the the instruction that describes the task:
### Input:
Creates a PDB object by using a cached copy of the file if it exists or by retrieving the file from the RCSB.
### Response:
def retrieve(pdb_id, cache_dir = None, strict = True, parse_ligands = False):
'''Creates a PDB object by using a cached copy of the file if it exists or by retrieving the file from the RCSB.'''
# Check to see whether we have a cached copy
pdb_id = pdb_id.upper()
if cache_dir:
filename = os.path.join(cache_dir, "%s.pdb" % pdb_id)
if os.path.exists(filename):
return PDB(read_file(filename), strict = strict, parse_ligands = parse_ligands)
# Get a copy from the RCSB
contents = rcsb.retrieve_pdb(pdb_id)
# Create a cached copy if appropriate
if cache_dir:
write_file(os.path.join(cache_dir, "%s.pdb" % pdb_id), contents)
# Return the object
return PDB(contents, strict = strict, parse_ligands = parse_ligands) |
def _set_port_channel(self, v, load=False):
"""
Setter method for port_channel, mapped from YANG variable /protocol/spanning_tree/rpvst/port_channel (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_port_channel is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_port_channel() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=port_channel.port_channel, is_container='container', presence=False, yang_name="port-channel", rest_name="port-channel", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Controls behaviour of port-channel for spanning-tree', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """port_channel must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=port_channel.port_channel, is_container='container', presence=False, yang_name="port-channel", rest_name="port-channel", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Controls behaviour of port-channel for spanning-tree', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True)""",
})
self.__port_channel = t
if hasattr(self, '_set'):
self._set() | Setter method for port_channel, mapped from YANG variable /protocol/spanning_tree/rpvst/port_channel (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_port_channel is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_port_channel() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for port_channel, mapped from YANG variable /protocol/spanning_tree/rpvst/port_channel (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_port_channel is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_port_channel() directly.
### Response:
def _set_port_channel(self, v, load=False):
"""
Setter method for port_channel, mapped from YANG variable /protocol/spanning_tree/rpvst/port_channel (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_port_channel is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_port_channel() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=port_channel.port_channel, is_container='container', presence=False, yang_name="port-channel", rest_name="port-channel", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Controls behaviour of port-channel for spanning-tree', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """port_channel must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=port_channel.port_channel, is_container='container', presence=False, yang_name="port-channel", rest_name="port-channel", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Controls behaviour of port-channel for spanning-tree', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True)""",
})
self.__port_channel = t
if hasattr(self, '_set'):
self._set() |
def nsplit(seq, n=2):
""" Split a sequence into pieces of length n
If the length of the sequence isn't a multiple of n, the rest is discarded.
Note that nsplit will split strings into individual characters.
Examples:
>>> nsplit("aabbcc")
[("a", "a"), ("b", "b"), ("c", "c")]
>>> nsplit("aabbcc",n=3)
[("a", "a", "b"), ("b", "c", "c")]
# Note that cc is discarded
>>> nsplit("aabbcc",n=4)
[("a", "a", "b", "b")]
"""
return [xy for xy in itertools.izip(*[iter(seq)]*n)] | Split a sequence into pieces of length n
If the length of the sequence isn't a multiple of n, the rest is discarded.
Note that nsplit will split strings into individual characters.
Examples:
>>> nsplit("aabbcc")
[("a", "a"), ("b", "b"), ("c", "c")]
>>> nsplit("aabbcc",n=3)
[("a", "a", "b"), ("b", "c", "c")]
# Note that cc is discarded
>>> nsplit("aabbcc",n=4)
[("a", "a", "b", "b")] | Below is the the instruction that describes the task:
### Input:
Split a sequence into pieces of length n
If the length of the sequence isn't a multiple of n, the rest is discarded.
Note that nsplit will split strings into individual characters.
Examples:
>>> nsplit("aabbcc")
[("a", "a"), ("b", "b"), ("c", "c")]
>>> nsplit("aabbcc",n=3)
[("a", "a", "b"), ("b", "c", "c")]
# Note that cc is discarded
>>> nsplit("aabbcc",n=4)
[("a", "a", "b", "b")]
### Response:
def nsplit(seq, n=2):
""" Split a sequence into pieces of length n
If the length of the sequence isn't a multiple of n, the rest is discarded.
Note that nsplit will split strings into individual characters.
Examples:
>>> nsplit("aabbcc")
[("a", "a"), ("b", "b"), ("c", "c")]
>>> nsplit("aabbcc",n=3)
[("a", "a", "b"), ("b", "c", "c")]
# Note that cc is discarded
>>> nsplit("aabbcc",n=4)
[("a", "a", "b", "b")]
"""
return [xy for xy in itertools.izip(*[iter(seq)]*n)] |
def convert_and_compare_caffe_to_mxnet(image_url, gpu, caffe_prototxt_path, caffe_model_path,
caffe_mean, mean_diff_allowed, max_diff_allowed):
"""
Run the layer comparison on a caffe model, given its prototxt, weights and mean.
The comparison is done by inferring on a given image using both caffe and mxnet model
:param image_url: image file or url to run inference on
:param gpu: gpu to use, -1 for cpu
:param caffe_prototxt_path: path to caffe prototxt
:param caffe_model_path: path to caffe weights
:param caffe_mean: path to caffe mean file
"""
import caffe
from caffe_proto_utils import read_network_dag, process_network_proto, read_caffe_mean
from convert_model import convert_model
if isinstance(caffe_mean, str):
caffe_mean = read_caffe_mean(caffe_mean)
elif caffe_mean is None:
pass
elif len(caffe_mean) == 3:
# swap channels from Caffe BGR to RGB
caffe_mean = caffe_mean[::-1]
# get caffe root location, this is needed to run the upgrade network utility, so we only need
# to support parsing of latest caffe
caffe_root = os.path.dirname(os.path.dirname(caffe.__path__[0]))
caffe_prototxt_path = process_network_proto(caffe_root, caffe_prototxt_path)
_, layer_name_to_record, top_to_layers = read_network_dag(caffe_prototxt_path)
caffe.set_mode_cpu()
caffe_net = caffe.Net(caffe_prototxt_path, caffe_model_path, caffe.TEST)
image_dims = tuple(caffe_net.blobs['data'].shape)[2:4]
logging.info('getting image %s', image_url)
img_rgb = read_image(image_url, image_dims, caffe_mean)
img_bgr = img_rgb[:, ::-1, :, :]
caffe_net.blobs['data'].reshape(*img_bgr.shape)
caffe_net.blobs['data'].data[...] = img_bgr
_ = caffe_net.forward()
# read sym and add all outputs
sym, arg_params, aux_params, _ = convert_model(caffe_prototxt_path, caffe_model_path)
sym = sym.get_internals()
# now mxnet
if gpu < 0:
ctx = mx.cpu(0)
else:
ctx = mx.gpu(gpu)
arg_params, aux_params = _ch_dev(arg_params, aux_params, ctx)
arg_params["data"] = mx.nd.array(img_rgb, ctx)
arg_params["prob_label"] = mx.nd.empty((1,), ctx)
exe = sym.bind(ctx, arg_params, args_grad=None, grad_req="null", aux_states=aux_params)
exe.forward(is_train=False)
compare_layers_from_nets(caffe_net, arg_params, aux_params, exe, layer_name_to_record,
top_to_layers, mean_diff_allowed, max_diff_allowed)
return | Run the layer comparison on a caffe model, given its prototxt, weights and mean.
The comparison is done by inferring on a given image using both caffe and mxnet model
:param image_url: image file or url to run inference on
:param gpu: gpu to use, -1 for cpu
:param caffe_prototxt_path: path to caffe prototxt
:param caffe_model_path: path to caffe weights
:param caffe_mean: path to caffe mean file | Below is the the instruction that describes the task:
### Input:
Run the layer comparison on a caffe model, given its prototxt, weights and mean.
The comparison is done by inferring on a given image using both caffe and mxnet model
:param image_url: image file or url to run inference on
:param gpu: gpu to use, -1 for cpu
:param caffe_prototxt_path: path to caffe prototxt
:param caffe_model_path: path to caffe weights
:param caffe_mean: path to caffe mean file
### Response:
def convert_and_compare_caffe_to_mxnet(image_url, gpu, caffe_prototxt_path, caffe_model_path,
caffe_mean, mean_diff_allowed, max_diff_allowed):
"""
Run the layer comparison on a caffe model, given its prototxt, weights and mean.
The comparison is done by inferring on a given image using both caffe and mxnet model
:param image_url: image file or url to run inference on
:param gpu: gpu to use, -1 for cpu
:param caffe_prototxt_path: path to caffe prototxt
:param caffe_model_path: path to caffe weights
:param caffe_mean: path to caffe mean file
"""
import caffe
from caffe_proto_utils import read_network_dag, process_network_proto, read_caffe_mean
from convert_model import convert_model
if isinstance(caffe_mean, str):
caffe_mean = read_caffe_mean(caffe_mean)
elif caffe_mean is None:
pass
elif len(caffe_mean) == 3:
# swap channels from Caffe BGR to RGB
caffe_mean = caffe_mean[::-1]
# get caffe root location, this is needed to run the upgrade network utility, so we only need
# to support parsing of latest caffe
caffe_root = os.path.dirname(os.path.dirname(caffe.__path__[0]))
caffe_prototxt_path = process_network_proto(caffe_root, caffe_prototxt_path)
_, layer_name_to_record, top_to_layers = read_network_dag(caffe_prototxt_path)
caffe.set_mode_cpu()
caffe_net = caffe.Net(caffe_prototxt_path, caffe_model_path, caffe.TEST)
image_dims = tuple(caffe_net.blobs['data'].shape)[2:4]
logging.info('getting image %s', image_url)
img_rgb = read_image(image_url, image_dims, caffe_mean)
img_bgr = img_rgb[:, ::-1, :, :]
caffe_net.blobs['data'].reshape(*img_bgr.shape)
caffe_net.blobs['data'].data[...] = img_bgr
_ = caffe_net.forward()
# read sym and add all outputs
sym, arg_params, aux_params, _ = convert_model(caffe_prototxt_path, caffe_model_path)
sym = sym.get_internals()
# now mxnet
if gpu < 0:
ctx = mx.cpu(0)
else:
ctx = mx.gpu(gpu)
arg_params, aux_params = _ch_dev(arg_params, aux_params, ctx)
arg_params["data"] = mx.nd.array(img_rgb, ctx)
arg_params["prob_label"] = mx.nd.empty((1,), ctx)
exe = sym.bind(ctx, arg_params, args_grad=None, grad_req="null", aux_states=aux_params)
exe.forward(is_train=False)
compare_layers_from_nets(caffe_net, arg_params, aux_params, exe, layer_name_to_record,
top_to_layers, mean_diff_allowed, max_diff_allowed)
return |
def speziale_grun(v, v0, gamma0, q0, q1):
"""
calculate Gruneisen parameter for the Speziale equation
:param v: unit-cell volume in A^3
:param v0: unit-cell volume in A^3 at 1 bar
:param gamma0: Gruneisen parameter at 1 bar
:param q0: logarithmic derivative of Gruneisen parameter
:param q1: logarithmic derivative of Gruneisen parameter
:return: Gruneisen parameter
"""
if isuncertainties([v, v0, gamma0, q0, q1]):
gamma = gamma0 * unp.exp(q0 / q1 * ((v / v0) ** q1 - 1.))
else:
gamma = gamma0 * np.exp(q0 / q1 * ((v / v0) ** q1 - 1.))
return gamma | calculate Gruneisen parameter for the Speziale equation
:param v: unit-cell volume in A^3
:param v0: unit-cell volume in A^3 at 1 bar
:param gamma0: Gruneisen parameter at 1 bar
:param q0: logarithmic derivative of Gruneisen parameter
:param q1: logarithmic derivative of Gruneisen parameter
:return: Gruneisen parameter | Below is the the instruction that describes the task:
### Input:
calculate Gruneisen parameter for the Speziale equation
:param v: unit-cell volume in A^3
:param v0: unit-cell volume in A^3 at 1 bar
:param gamma0: Gruneisen parameter at 1 bar
:param q0: logarithmic derivative of Gruneisen parameter
:param q1: logarithmic derivative of Gruneisen parameter
:return: Gruneisen parameter
### Response:
def speziale_grun(v, v0, gamma0, q0, q1):
"""
calculate Gruneisen parameter for the Speziale equation
:param v: unit-cell volume in A^3
:param v0: unit-cell volume in A^3 at 1 bar
:param gamma0: Gruneisen parameter at 1 bar
:param q0: logarithmic derivative of Gruneisen parameter
:param q1: logarithmic derivative of Gruneisen parameter
:return: Gruneisen parameter
"""
if isuncertainties([v, v0, gamma0, q0, q1]):
gamma = gamma0 * unp.exp(q0 / q1 * ((v / v0) ** q1 - 1.))
else:
gamma = gamma0 * np.exp(q0 / q1 * ((v / v0) ** q1 - 1.))
return gamma |
def insert_many(ol,eles,locs,**kwargs):
'''
from elist.elist import *
ol = [1,2,3,4,5]
eles = [7,77,777]
locs = [0,2,4]
id(ol)
new = insert_many(ol,eles,locs)
ol
new
id(new)
####
ol = [1,2,3,4,5]
eles = [7,77,777]
locs = [0,2,4]
id(ol)
rslt = insert_many(ol,eles,locs,mode="original")
ol
rslt
id(rslt)
'''
if('mode' in kwargs):
mode = kwargs["mode"]
else:
mode = "new"
eles = copy.deepcopy(eles)
locs = copy.deepcopy(locs)
new = []
length = ol.__len__()
cpol = copy.deepcopy(ol)
for i in range(0,locs.__len__()):
if(locs[i]>=length):
pass
else:
locs[i] = uniform_index(locs[i],length)
tmp = sorted_refer_to(eles,locs)
eles = tmp['list']
locs = tmp['referer']
label = eles.__len__()
si = 0
ei = 0
for i in range(0,locs.__len__()):
if(locs[i]>=length):
label = i
break
else:
ei = locs[i]
new.extend(cpol[si:ei])
new.append(eles[i])
si = ei
for i in range(label,locs.__len__()):
new.append(eles[i])
new.extend(cpol[ei:])
if(mode == "new"):
return(new)
else:
ol.clear()
ol.extend(new)
return(ol) | from elist.elist import *
ol = [1,2,3,4,5]
eles = [7,77,777]
locs = [0,2,4]
id(ol)
new = insert_many(ol,eles,locs)
ol
new
id(new)
####
ol = [1,2,3,4,5]
eles = [7,77,777]
locs = [0,2,4]
id(ol)
rslt = insert_many(ol,eles,locs,mode="original")
ol
rslt
id(rslt) | Below is the the instruction that describes the task:
### Input:
from elist.elist import *
ol = [1,2,3,4,5]
eles = [7,77,777]
locs = [0,2,4]
id(ol)
new = insert_many(ol,eles,locs)
ol
new
id(new)
####
ol = [1,2,3,4,5]
eles = [7,77,777]
locs = [0,2,4]
id(ol)
rslt = insert_many(ol,eles,locs,mode="original")
ol
rslt
id(rslt)
### Response:
def insert_many(ol,eles,locs,**kwargs):
'''
from elist.elist import *
ol = [1,2,3,4,5]
eles = [7,77,777]
locs = [0,2,4]
id(ol)
new = insert_many(ol,eles,locs)
ol
new
id(new)
####
ol = [1,2,3,4,5]
eles = [7,77,777]
locs = [0,2,4]
id(ol)
rslt = insert_many(ol,eles,locs,mode="original")
ol
rslt
id(rslt)
'''
if('mode' in kwargs):
mode = kwargs["mode"]
else:
mode = "new"
eles = copy.deepcopy(eles)
locs = copy.deepcopy(locs)
new = []
length = ol.__len__()
cpol = copy.deepcopy(ol)
for i in range(0,locs.__len__()):
if(locs[i]>=length):
pass
else:
locs[i] = uniform_index(locs[i],length)
tmp = sorted_refer_to(eles,locs)
eles = tmp['list']
locs = tmp['referer']
label = eles.__len__()
si = 0
ei = 0
for i in range(0,locs.__len__()):
if(locs[i]>=length):
label = i
break
else:
ei = locs[i]
new.extend(cpol[si:ei])
new.append(eles[i])
si = ei
for i in range(label,locs.__len__()):
new.append(eles[i])
new.extend(cpol[ei:])
if(mode == "new"):
return(new)
else:
ol.clear()
ol.extend(new)
return(ol) |
def enable_spi(self):
"""Set this to `True` to enable the hardware SPI interface. If set to
`False` the hardware interface will be disabled and its pins (MISO,
MOSI, SCK and SS) can be used as GPIOs.
"""
config = self._interface_configuration(CONFIG_QUERY)
return config == CONFIG_SPI_GPIO or config == CONFIG_SPI_I2C | Set this to `True` to enable the hardware SPI interface. If set to
`False` the hardware interface will be disabled and its pins (MISO,
MOSI, SCK and SS) can be used as GPIOs. | Below is the the instruction that describes the task:
### Input:
Set this to `True` to enable the hardware SPI interface. If set to
`False` the hardware interface will be disabled and its pins (MISO,
MOSI, SCK and SS) can be used as GPIOs.
### Response:
def enable_spi(self):
"""Set this to `True` to enable the hardware SPI interface. If set to
`False` the hardware interface will be disabled and its pins (MISO,
MOSI, SCK and SS) can be used as GPIOs.
"""
config = self._interface_configuration(CONFIG_QUERY)
return config == CONFIG_SPI_GPIO or config == CONFIG_SPI_I2C |
def state(self, state=vanilla.message.NoState):
"""
Returns a `State`_ `Pair`_.
*state* if supplied sets the intial state.
"""
return vanilla.message.State(self, state=state) | Returns a `State`_ `Pair`_.
*state* if supplied sets the intial state. | Below is the the instruction that describes the task:
### Input:
Returns a `State`_ `Pair`_.
*state* if supplied sets the intial state.
### Response:
def state(self, state=vanilla.message.NoState):
"""
Returns a `State`_ `Pair`_.
*state* if supplied sets the intial state.
"""
return vanilla.message.State(self, state=state) |
def load(self, *args, **kwargs):
"""Load the required datasets from the multiple scenes."""
self._generate_scene_func(self._scenes, 'load', False, *args, **kwargs) | Load the required datasets from the multiple scenes. | Below is the the instruction that describes the task:
### Input:
Load the required datasets from the multiple scenes.
### Response:
def load(self, *args, **kwargs):
"""Load the required datasets from the multiple scenes."""
self._generate_scene_func(self._scenes, 'load', False, *args, **kwargs) |
def add_device(self, device_id):
""" Method for `Add device to collection <https://m2x.att.com/developer/documentation/v2/collections#Add-device-to-collection>`_ endpoint.
:param device_id: ID of the Device being added to Collection
:type device_id: str
:raises: :class:`~requests.exceptions.HTTPError` if an error occurs when sending the HTTP request
"""
path = self.subpath('/devices/{device_id}'.format(device_id=device_id))
return self.api.put(path) | Method for `Add device to collection <https://m2x.att.com/developer/documentation/v2/collections#Add-device-to-collection>`_ endpoint.
:param device_id: ID of the Device being added to Collection
:type device_id: str
:raises: :class:`~requests.exceptions.HTTPError` if an error occurs when sending the HTTP request | Below is the the instruction that describes the task:
### Input:
Method for `Add device to collection <https://m2x.att.com/developer/documentation/v2/collections#Add-device-to-collection>`_ endpoint.
:param device_id: ID of the Device being added to Collection
:type device_id: str
:raises: :class:`~requests.exceptions.HTTPError` if an error occurs when sending the HTTP request
### Response:
def add_device(self, device_id):
""" Method for `Add device to collection <https://m2x.att.com/developer/documentation/v2/collections#Add-device-to-collection>`_ endpoint.
:param device_id: ID of the Device being added to Collection
:type device_id: str
:raises: :class:`~requests.exceptions.HTTPError` if an error occurs when sending the HTTP request
"""
path = self.subpath('/devices/{device_id}'.format(device_id=device_id))
return self.api.put(path) |
def get_secure_cookie_key_version(
self, name: str, value: str = None
) -> Optional[int]:
"""Returns the signing key version of the secure cookie.
The version is returned as int.
"""
self.require_setting("cookie_secret", "secure cookies")
if value is None:
value = self.get_cookie(name)
if value is None:
return None
return get_signature_key_version(value) | Returns the signing key version of the secure cookie.
The version is returned as int. | Below is the the instruction that describes the task:
### Input:
Returns the signing key version of the secure cookie.
The version is returned as int.
### Response:
def get_secure_cookie_key_version(
self, name: str, value: str = None
) -> Optional[int]:
"""Returns the signing key version of the secure cookie.
The version is returned as int.
"""
self.require_setting("cookie_secret", "secure cookies")
if value is None:
value = self.get_cookie(name)
if value is None:
return None
return get_signature_key_version(value) |
def write_script(self):
"""
Write the workflow to a script (.sh instead of .dag).
Assuming that parents were added to the DAG before their children,
dependencies should be handled correctly.
"""
if not self.__dag_file_path:
raise CondorDAGError, "No path for DAG file"
try:
dfp = self.__dag_file_path
outfilename = ".".join(dfp.split(".")[:-1]) + ".sh"
outfile = open(outfilename, "w")
except:
raise CondorDAGError, "Cannot open file " + self.__dag_file_path
for node in self.__nodes:
outfile.write("# Job %s\n" % node.get_name())
# Check if this is a DAGMAN Node
if isinstance(node,CondorDAGManNode):
outfile.write("condor_submit_dag %s\n\n" % (node.job().get_dag()))
else:
outfile.write("%s %s\n\n" % (node.job().get_executable(),
node.get_cmd_line()))
outfile.close()
os.chmod(outfilename, os.stat(outfilename)[0] | stat.S_IEXEC) | Write the workflow to a script (.sh instead of .dag).
Assuming that parents were added to the DAG before their children,
dependencies should be handled correctly. | Below is the the instruction that describes the task:
### Input:
Write the workflow to a script (.sh instead of .dag).
Assuming that parents were added to the DAG before their children,
dependencies should be handled correctly.
### Response:
def write_script(self):
"""
Write the workflow to a script (.sh instead of .dag).
Assuming that parents were added to the DAG before their children,
dependencies should be handled correctly.
"""
if not self.__dag_file_path:
raise CondorDAGError, "No path for DAG file"
try:
dfp = self.__dag_file_path
outfilename = ".".join(dfp.split(".")[:-1]) + ".sh"
outfile = open(outfilename, "w")
except:
raise CondorDAGError, "Cannot open file " + self.__dag_file_path
for node in self.__nodes:
outfile.write("# Job %s\n" % node.get_name())
# Check if this is a DAGMAN Node
if isinstance(node,CondorDAGManNode):
outfile.write("condor_submit_dag %s\n\n" % (node.job().get_dag()))
else:
outfile.write("%s %s\n\n" % (node.job().get_executable(),
node.get_cmd_line()))
outfile.close()
os.chmod(outfilename, os.stat(outfilename)[0] | stat.S_IEXEC) |
def long_to_bytes(n, blocksize=0):
"""long_to_bytes(n:long, blocksize:int) : string
Convert a long integer to a byte string.
If optional blocksize is given and greater than zero, pad the front of the
byte string with binary zeros so that the length is a multiple of
blocksize.
"""
# after much testing, this algorithm was deemed to be the fastest
s = b''
if USING_PYTHON2:
n = long(n) # noqa
pack = struct.pack
while n > 0:
s = pack(b'>I', n & 0xffffffff) + s
n = n >> 32
# strip off leading zeros
for i in range(len(s)):
if s[i] != b'\000'[0]:
break
else:
# only happens when n == 0
s = b'\000'
i = 0
s = s[i:]
# add back some pad bytes. this could be done more efficiently w.r.t. the
# de-padding being done above, but sigh...
if blocksize > 0 and len(s) % blocksize:
s = (blocksize - len(s) % blocksize) * b'\000' + s
return s | long_to_bytes(n:long, blocksize:int) : string
Convert a long integer to a byte string.
If optional blocksize is given and greater than zero, pad the front of the
byte string with binary zeros so that the length is a multiple of
blocksize. | Below is the the instruction that describes the task:
### Input:
long_to_bytes(n:long, blocksize:int) : string
Convert a long integer to a byte string.
If optional blocksize is given and greater than zero, pad the front of the
byte string with binary zeros so that the length is a multiple of
blocksize.
### Response:
def long_to_bytes(n, blocksize=0):
"""long_to_bytes(n:long, blocksize:int) : string
Convert a long integer to a byte string.
If optional blocksize is given and greater than zero, pad the front of the
byte string with binary zeros so that the length is a multiple of
blocksize.
"""
# after much testing, this algorithm was deemed to be the fastest
s = b''
if USING_PYTHON2:
n = long(n) # noqa
pack = struct.pack
while n > 0:
s = pack(b'>I', n & 0xffffffff) + s
n = n >> 32
# strip off leading zeros
for i in range(len(s)):
if s[i] != b'\000'[0]:
break
else:
# only happens when n == 0
s = b'\000'
i = 0
s = s[i:]
# add back some pad bytes. this could be done more efficiently w.r.t. the
# de-padding being done above, but sigh...
if blocksize > 0 and len(s) % blocksize:
s = (blocksize - len(s) % blocksize) * b'\000' + s
return s |
def delete_metadata_at_key(self, key):
"""
::
DELETE /:login/machines/:id/metadata/:key
:param key: identifier for matadata entry
:type key: :py:class:`basestring`
:Returns: current metadata
:rtype: :py:class:`dict`
Deletes the machine metadata contained at 'key'. Also explicitly
requests and returns the machine metadata so that the local copy stays
synchronized.
"""
j, r = self.datacenter.request('DELETE', self.path + '/metadata/' +
key)
r.raise_for_status()
return self.get_metadata() | ::
DELETE /:login/machines/:id/metadata/:key
:param key: identifier for matadata entry
:type key: :py:class:`basestring`
:Returns: current metadata
:rtype: :py:class:`dict`
Deletes the machine metadata contained at 'key'. Also explicitly
requests and returns the machine metadata so that the local copy stays
synchronized. | Below is the the instruction that describes the task:
### Input:
::
DELETE /:login/machines/:id/metadata/:key
:param key: identifier for matadata entry
:type key: :py:class:`basestring`
:Returns: current metadata
:rtype: :py:class:`dict`
Deletes the machine metadata contained at 'key'. Also explicitly
requests and returns the machine metadata so that the local copy stays
synchronized.
### Response:
def delete_metadata_at_key(self, key):
"""
::
DELETE /:login/machines/:id/metadata/:key
:param key: identifier for matadata entry
:type key: :py:class:`basestring`
:Returns: current metadata
:rtype: :py:class:`dict`
Deletes the machine metadata contained at 'key'. Also explicitly
requests and returns the machine metadata so that the local copy stays
synchronized.
"""
j, r = self.datacenter.request('DELETE', self.path + '/metadata/' +
key)
r.raise_for_status()
return self.get_metadata() |
def to_json(self):
"""
put the object to json and remove the internal stuff
salesking schema stores the type in the title
"""
data = json.dumps(self)
out = u'{"%s":%s}' % (self.schema['title'], data)
return out | put the object to json and remove the internal stuff
salesking schema stores the type in the title | Below is the the instruction that describes the task:
### Input:
put the object to json and remove the internal stuff
salesking schema stores the type in the title
### Response:
def to_json(self):
"""
put the object to json and remove the internal stuff
salesking schema stores the type in the title
"""
data = json.dumps(self)
out = u'{"%s":%s}' % (self.schema['title'], data)
return out |
def insertion_sort(arr, simulation=False):
""" Insertion Sort
Complexity: O(n^2)
"""
iteration = 0
if simulation:
print("iteration",iteration,":",*arr)
for i in range(len(arr)):
cursor = arr[i]
pos = i
while pos > 0 and arr[pos - 1] > cursor:
# Swap the number down the list
arr[pos] = arr[pos - 1]
pos = pos - 1
# Break and do the final swap
arr[pos] = cursor
if simulation:
iteration = iteration + 1
print("iteration",iteration,":",*arr)
return arr | Insertion Sort
Complexity: O(n^2) | Below is the the instruction that describes the task:
### Input:
Insertion Sort
Complexity: O(n^2)
### Response:
def insertion_sort(arr, simulation=False):
""" Insertion Sort
Complexity: O(n^2)
"""
iteration = 0
if simulation:
print("iteration",iteration,":",*arr)
for i in range(len(arr)):
cursor = arr[i]
pos = i
while pos > 0 and arr[pos - 1] > cursor:
# Swap the number down the list
arr[pos] = arr[pos - 1]
pos = pos - 1
# Break and do the final swap
arr[pos] = cursor
if simulation:
iteration = iteration + 1
print("iteration",iteration,":",*arr)
return arr |
def _get_max_name_len(instances):
"""get max length of Tag:Name"""
# FIXME: ec2.instanceCollection doesn't have __len__
for i in instances:
return max([len(get_tag_value(i.tags, 'Name')) for i in instances])
return 0 | get max length of Tag:Name | Below is the the instruction that describes the task:
### Input:
get max length of Tag:Name
### Response:
def _get_max_name_len(instances):
"""get max length of Tag:Name"""
# FIXME: ec2.instanceCollection doesn't have __len__
for i in instances:
return max([len(get_tag_value(i.tags, 'Name')) for i in instances])
return 0 |
def circle_intersection(self, p: "Point2", r: Union[int, float]) -> Set["Point2"]:
""" self is point1, p is point2, r is the radius for circles originating in both points
Used in ramp finding """
assert self != p
distanceBetweenPoints = self.distance_to(p)
assert r > distanceBetweenPoints / 2
# remaining distance from center towards the intersection, using pythagoras
remainingDistanceFromCenter = (r ** 2 - (distanceBetweenPoints / 2) ** 2) ** 0.5
# center of both points
offsetToCenter = Point2(((p.x - self.x) / 2, (p.y - self.y) / 2))
center = self.offset(offsetToCenter)
# stretch offset vector in the ratio of remaining distance from center to intersection
vectorStretchFactor = remainingDistanceFromCenter / (distanceBetweenPoints / 2)
v = offsetToCenter
offsetToCenterStretched = Point2((v.x * vectorStretchFactor, v.y * vectorStretchFactor))
# rotate vector by 90° and -90°
vectorRotated1 = Point2((offsetToCenterStretched.y, -offsetToCenterStretched.x))
vectorRotated2 = Point2((-offsetToCenterStretched.y, offsetToCenterStretched.x))
intersect1 = center.offset(vectorRotated1)
intersect2 = center.offset(vectorRotated2)
return {intersect1, intersect2} | self is point1, p is point2, r is the radius for circles originating in both points
Used in ramp finding | Below is the the instruction that describes the task:
### Input:
self is point1, p is point2, r is the radius for circles originating in both points
Used in ramp finding
### Response:
def circle_intersection(self, p: "Point2", r: Union[int, float]) -> Set["Point2"]:
""" self is point1, p is point2, r is the radius for circles originating in both points
Used in ramp finding """
assert self != p
distanceBetweenPoints = self.distance_to(p)
assert r > distanceBetweenPoints / 2
# remaining distance from center towards the intersection, using pythagoras
remainingDistanceFromCenter = (r ** 2 - (distanceBetweenPoints / 2) ** 2) ** 0.5
# center of both points
offsetToCenter = Point2(((p.x - self.x) / 2, (p.y - self.y) / 2))
center = self.offset(offsetToCenter)
# stretch offset vector in the ratio of remaining distance from center to intersection
vectorStretchFactor = remainingDistanceFromCenter / (distanceBetweenPoints / 2)
v = offsetToCenter
offsetToCenterStretched = Point2((v.x * vectorStretchFactor, v.y * vectorStretchFactor))
# rotate vector by 90° and -90°
vectorRotated1 = Point2((offsetToCenterStretched.y, -offsetToCenterStretched.x))
vectorRotated2 = Point2((-offsetToCenterStretched.y, offsetToCenterStretched.x))
intersect1 = center.offset(vectorRotated1)
intersect2 = center.offset(vectorRotated2)
return {intersect1, intersect2} |
def assign_dna_reads_to_protein_database(query_fasta_fp, database_fasta_fp,
output_fp, temp_dir="/tmp", params=None):
"""Assign DNA reads to a database fasta of protein sequences.
Wraps assign_reads_to_database, setting database and query types. All
parameters are set to default unless params is passed. A temporary
file must be written containing the translated sequences from the input
query fasta file because BLAT cannot do this automatically.
query_fasta_fp: absolute path to the query fasta file containing DNA
sequences.
database_fasta_fp: absolute path to the database fasta file containing
protein sequences.
output_fp: absolute path where the output file will be generated.
temp_dir: optional. Change the location where the translated sequences
will be written before being used as the query. Defaults to
/tmp.
params: optional. dict containing parameter settings to be used
instead of default values. Cannot change database or query
file types from protein and dna, respectively.
This method returns an open file object. The output format
defaults to blast9 and should be parsable by the PyCogent BLAST parsers.
"""
if params is None:
params = {}
my_params = {'-t': 'prot', '-q': 'prot'}
# make sure temp_dir specifies an absolute path
if not isabs(temp_dir):
raise ApplicationError("temp_dir must be an absolute path.")
# if the user specified parameters other than default, then use them.
# However, if they try to change the database or query types, raise an
# applciation error.
if '-t' in params or '-q' in params:
raise ApplicationError("Cannot change database or query types "
"when using assign_dna_reads_to_dna_database. Use "
"assign_reads_to_database instead.")
if 'genetic_code' in params:
my_genetic_code = GeneticCodes[params['genetic_code']]
del params['genetic_code']
else:
my_genetic_code = GeneticCodes[1]
my_params.update(params)
# get six-frame translation of the input DNA sequences and write them to
# temporary file.
_, tmp = mkstemp(dir=temp_dir)
tmp_out = open(tmp, 'w')
for label, sequence in parse_fasta(open(query_fasta_fp)):
seq_id = label.split()[0]
s = DNA.makeSequence(sequence)
translations = my_genetic_code.sixframes(s)
frames = [1, 2, 3, -1, -2, -3]
translations = dict(zip(frames, translations))
for frame, translation in sorted(translations.iteritems()):
entry = '>{seq_id}_frame_{frame}\n{trans}\n'
entry = entry.format(seq_id=seq_id, frame=frame, trans=translation)
tmp_out.write(entry)
tmp_out.close()
result = assign_reads_to_database(tmp, database_fasta_fp, output_fp,
params=my_params)
remove(tmp)
return result | Assign DNA reads to a database fasta of protein sequences.
Wraps assign_reads_to_database, setting database and query types. All
parameters are set to default unless params is passed. A temporary
file must be written containing the translated sequences from the input
query fasta file because BLAT cannot do this automatically.
query_fasta_fp: absolute path to the query fasta file containing DNA
sequences.
database_fasta_fp: absolute path to the database fasta file containing
protein sequences.
output_fp: absolute path where the output file will be generated.
temp_dir: optional. Change the location where the translated sequences
will be written before being used as the query. Defaults to
/tmp.
params: optional. dict containing parameter settings to be used
instead of default values. Cannot change database or query
file types from protein and dna, respectively.
This method returns an open file object. The output format
defaults to blast9 and should be parsable by the PyCogent BLAST parsers. | Below is the the instruction that describes the task:
### Input:
Assign DNA reads to a database fasta of protein sequences.
Wraps assign_reads_to_database, setting database and query types. All
parameters are set to default unless params is passed. A temporary
file must be written containing the translated sequences from the input
query fasta file because BLAT cannot do this automatically.
query_fasta_fp: absolute path to the query fasta file containing DNA
sequences.
database_fasta_fp: absolute path to the database fasta file containing
protein sequences.
output_fp: absolute path where the output file will be generated.
temp_dir: optional. Change the location where the translated sequences
will be written before being used as the query. Defaults to
/tmp.
params: optional. dict containing parameter settings to be used
instead of default values. Cannot change database or query
file types from protein and dna, respectively.
This method returns an open file object. The output format
defaults to blast9 and should be parsable by the PyCogent BLAST parsers.
### Response:
def assign_dna_reads_to_protein_database(query_fasta_fp, database_fasta_fp,
output_fp, temp_dir="/tmp", params=None):
"""Assign DNA reads to a database fasta of protein sequences.
Wraps assign_reads_to_database, setting database and query types. All
parameters are set to default unless params is passed. A temporary
file must be written containing the translated sequences from the input
query fasta file because BLAT cannot do this automatically.
query_fasta_fp: absolute path to the query fasta file containing DNA
sequences.
database_fasta_fp: absolute path to the database fasta file containing
protein sequences.
output_fp: absolute path where the output file will be generated.
temp_dir: optional. Change the location where the translated sequences
will be written before being used as the query. Defaults to
/tmp.
params: optional. dict containing parameter settings to be used
instead of default values. Cannot change database or query
file types from protein and dna, respectively.
This method returns an open file object. The output format
defaults to blast9 and should be parsable by the PyCogent BLAST parsers.
"""
if params is None:
params = {}
my_params = {'-t': 'prot', '-q': 'prot'}
# make sure temp_dir specifies an absolute path
if not isabs(temp_dir):
raise ApplicationError("temp_dir must be an absolute path.")
# if the user specified parameters other than default, then use them.
# However, if they try to change the database or query types, raise an
# applciation error.
if '-t' in params or '-q' in params:
raise ApplicationError("Cannot change database or query types "
"when using assign_dna_reads_to_dna_database. Use "
"assign_reads_to_database instead.")
if 'genetic_code' in params:
my_genetic_code = GeneticCodes[params['genetic_code']]
del params['genetic_code']
else:
my_genetic_code = GeneticCodes[1]
my_params.update(params)
# get six-frame translation of the input DNA sequences and write them to
# temporary file.
_, tmp = mkstemp(dir=temp_dir)
tmp_out = open(tmp, 'w')
for label, sequence in parse_fasta(open(query_fasta_fp)):
seq_id = label.split()[0]
s = DNA.makeSequence(sequence)
translations = my_genetic_code.sixframes(s)
frames = [1, 2, 3, -1, -2, -3]
translations = dict(zip(frames, translations))
for frame, translation in sorted(translations.iteritems()):
entry = '>{seq_id}_frame_{frame}\n{trans}\n'
entry = entry.format(seq_id=seq_id, frame=frame, trans=translation)
tmp_out.write(entry)
tmp_out.close()
result = assign_reads_to_database(tmp, database_fasta_fp, output_fp,
params=my_params)
remove(tmp)
return result |
def login(provider_id):
"""Starts the provider login OAuth flow"""
provider = get_provider_or_404(provider_id)
callback_url = get_authorize_callback('login', provider_id)
post_login = request.form.get('next', get_post_login_redirect())
session[config_value('POST_OAUTH_LOGIN_SESSION_KEY')] = post_login
return provider.authorize(callback_url) | Starts the provider login OAuth flow | Below is the the instruction that describes the task:
### Input:
Starts the provider login OAuth flow
### Response:
def login(provider_id):
"""Starts the provider login OAuth flow"""
provider = get_provider_or_404(provider_id)
callback_url = get_authorize_callback('login', provider_id)
post_login = request.form.get('next', get_post_login_redirect())
session[config_value('POST_OAUTH_LOGIN_SESSION_KEY')] = post_login
return provider.authorize(callback_url) |
def set_package(fxn):
"""Set __package__ on the returned module.
This function is deprecated.
"""
@functools.wraps(fxn)
def set_package_wrapper(*args, **kwargs):
warnings.warn('The import system now takes care of this automatically.',
DeprecationWarning, stacklevel=2)
module = fxn(*args, **kwargs)
if getattr(module, '__package__', None) is None:
module.__package__ = module.__name__
if not hasattr(module, '__path__'):
module.__package__ = module.__package__.rpartition('.')[0]
return module
return set_package_wrapper | Set __package__ on the returned module.
This function is deprecated. | Below is the the instruction that describes the task:
### Input:
Set __package__ on the returned module.
This function is deprecated.
### Response:
def set_package(fxn):
"""Set __package__ on the returned module.
This function is deprecated.
"""
@functools.wraps(fxn)
def set_package_wrapper(*args, **kwargs):
warnings.warn('The import system now takes care of this automatically.',
DeprecationWarning, stacklevel=2)
module = fxn(*args, **kwargs)
if getattr(module, '__package__', None) is None:
module.__package__ = module.__name__
if not hasattr(module, '__path__'):
module.__package__ = module.__package__.rpartition('.')[0]
return module
return set_package_wrapper |
def value(self):
"""Get tensor that the random variable corresponds to."""
if self._value is None:
try:
self._value = self.distribution.sample(self.sample_shape_tensor())
except NotImplementedError:
raise NotImplementedError(
"sample is not implemented for {0}. You must either pass in the "
"value argument or implement sample for {0}."
.format(self.distribution.__class__.__name__))
return self._value | Get tensor that the random variable corresponds to. | Below is the the instruction that describes the task:
### Input:
Get tensor that the random variable corresponds to.
### Response:
def value(self):
"""Get tensor that the random variable corresponds to."""
if self._value is None:
try:
self._value = self.distribution.sample(self.sample_shape_tensor())
except NotImplementedError:
raise NotImplementedError(
"sample is not implemented for {0}. You must either pass in the "
"value argument or implement sample for {0}."
.format(self.distribution.__class__.__name__))
return self._value |
def get_pil_mode(value, alpha=False):
"""Get PIL mode from ColorMode."""
name = {
'GRAYSCALE': 'L',
'BITMAP': '1',
'DUOTONE': 'L',
'INDEXED': 'P',
}.get(value, value)
if alpha and name in ('L', 'RGB'):
name += 'A'
return name | Get PIL mode from ColorMode. | Below is the the instruction that describes the task:
### Input:
Get PIL mode from ColorMode.
### Response:
def get_pil_mode(value, alpha=False):
"""Get PIL mode from ColorMode."""
name = {
'GRAYSCALE': 'L',
'BITMAP': '1',
'DUOTONE': 'L',
'INDEXED': 'P',
}.get(value, value)
if alpha and name in ('L', 'RGB'):
name += 'A'
return name |
def rearrange_pads(pads):
""" Interleave pad values to match NNabla format
(S0,S1,E0,E1) => (S0,E0,S1,E1)"""
half = len(pads)//2
starts = pads[:half]
ends = pads[half:]
return [j for i in zip(starts, ends) for j in i] | Interleave pad values to match NNabla format
(S0,S1,E0,E1) => (S0,E0,S1,E1) | Below is the the instruction that describes the task:
### Input:
Interleave pad values to match NNabla format
(S0,S1,E0,E1) => (S0,E0,S1,E1)
### Response:
def rearrange_pads(pads):
""" Interleave pad values to match NNabla format
(S0,S1,E0,E1) => (S0,E0,S1,E1)"""
half = len(pads)//2
starts = pads[:half]
ends = pads[half:]
return [j for i in zip(starts, ends) for j in i] |
def elasticsearch(serializer, catalog):
"""
https://www.elastic.co/guide/en/elasticsearch/reference/current/_the_search_api.html
:param serializer:
:return:
"""
search_engine_endpoint = "{0}/{1}/_search".format(SEARCH_URL, catalog.slug)
q_text = serializer.validated_data.get("q_text")
q_time = serializer.validated_data.get("q_time")
q_geo = serializer.validated_data.get("q_geo")
q_user = serializer.validated_data.get("q_user")
d_docs_sort = serializer.validated_data.get("d_docs_sort")
d_docs_limit = int(serializer.validated_data.get("d_docs_limit"))
d_docs_page = int(serializer.validated_data.get("d_docs_page"))
a_text_limit = serializer.validated_data.get("a_text_limit")
a_user_limit = serializer.validated_data.get("a_user_limit")
a_time_gap = serializer.validated_data.get("a_time_gap")
a_time_limit = serializer.validated_data.get("a_time_limit")
original_response = serializer.validated_data.get("original_response")
# Dict for search on Elastic engine
must_array = []
filter_dic = {}
aggs_dic = {}
# get ES version to make the query builder to be backward compatible with
# diffs versions.
# TODO: move this to a proper place. maybe ES client?.
# TODO: cache it to avoid overwhelm ES with this call.
# TODO: ask for ES_VERSION when building queries with an elegant way.
ES_VERSION = 2
response = requests.get(SEARCH_URL)
if response.ok:
# looks ugly but will work on normal ES response for "/".
ES_VERSION = int(response.json()["version"]["number"][0])
# String searching
if q_text:
# Wrapping query string into a query filter.
if ES_VERSION >= 2:
query_string = {
"query_string": {
"query": q_text
}
}
else:
query_string = {
"query": {
"query_string": {
"query": q_text
}
}
}
# add string searching
must_array.append(query_string)
if q_time:
# check if q_time exists
q_time = str(q_time) # check string
shortener = q_time[1:-1]
shortener = shortener.split(" TO ")
gte = shortener[0] # greater than
lte = shortener[1] # less than
layer_date = {}
if gte == '*' and lte != '*':
layer_date["lte"] = lte
range_time = {
"layer_date": layer_date
}
range_time = {"range": range_time}
must_array.append(range_time)
if gte != '*' and lte == '*':
layer_date["gte"] = gte
range_time = {
"layer_date": layer_date
}
range_time = {"range": range_time}
must_array.append(range_time)
if gte != '*' and lte != '*':
layer_date["gte"] = gte
layer_date["lte"] = lte
range_time = {
"layer_date": layer_date
}
range_time = {"range": range_time}
must_array.append(range_time)
# geo_shape searching
if q_geo:
q_geo = str(q_geo)
q_geo = q_geo[1:-1]
Ymin, Xmin = q_geo.split(" TO ")[0].split(",")
Ymax, Xmax = q_geo.split(" TO ")[1].split(",")
geoshape_query = {
"layer_geoshape": {
"shape": {
"type": "envelope",
"coordinates": [[Xmin, Ymax], [Xmax, Ymin]]
},
"relation": "intersects"
}
}
filter_dic["geo_shape"] = geoshape_query
if q_user:
# Using q_user
user_searching = {
"match": {
"layer_originator": q_user
}
}
must_array.append(user_searching)
if ES_VERSION >= 2:
dic_query = {
"query": {
"bool": {
"must": must_array,
"filter": filter_dic
}
}
}
else:
dic_query = {
"query": {
"filtered": {
"filter": {
"bool": {
"must": must_array,
"should": filter_dic
}
}
}
}
}
# Page
if d_docs_limit:
dic_query["size"] = d_docs_limit
if d_docs_page:
dic_query["from"] = d_docs_limit * d_docs_page - d_docs_limit
if d_docs_sort == "score":
dic_query["sort"] = {"_score": {"order": "desc"}}
if d_docs_sort == "time":
dic_query["sort"] = {"layer_date": {"order": "desc"}}
if d_docs_sort == "distance":
if q_geo:
# distance_x = float(((float(Xmin) - float(Xmax)) ** 2.0) ** (0.5))
# distance_y = float(((float(Ymin) - float(Ymax)) ** 2.0) ** (0.5))
msg = ("Sorting by distance is different on ElasticSearch than Solr, because this"
"feature on elastic is unavailable to geo_shape type.ElasticSearch docs said:"
"Due to the complex input structure and index representation of shapes,"
"it is not currently possible to sort shapes or retrieve their fields directly."
"The geo_shape value is only retrievable through the _source field."
" Link: https://www.elastic.co/guide/en/elasticsearch/reference/current/geo-shape.html")
return {"error": {"msg": msg}}
else:
msg = "q_qeo MUST BE NO ZERO if you wanna sort by distance"
return {"error": {"msg": msg}}
if a_text_limit:
# getting most frequently occurring users.
text_limit = {
"terms": {
"field": "abstract",
"size": a_text_limit
}
}
aggs_dic['popular_text'] = text_limit
if a_user_limit:
# getting most frequently occurring users.
users_limit = {
"terms": {
"field": "layer_originator",
"size": a_user_limit
}
}
aggs_dic['popular_users'] = users_limit
if a_time_limit:
# TODO: Work in progress, a_time_limit is incomplete.
# TODO: when times are * it does not work. also a a_time_gap is not required.
if q_time:
if not a_time_gap:
# getting time limit histogram.
time_limt = {
"date_range": {
"field": "layer_date",
"format": "yyyy-MM-dd'T'HH:mm:ssZ",
"ranges": [
{"from": gte, "to": lte}
]
}
}
aggs_dic['range'] = time_limt
else:
pass
else:
msg = "If you want to use a_time_limit feature, q_time MUST BE initialized"
return {"error": {"msg": msg}}
if a_time_gap:
interval = gap_to_elastic(a_time_gap)
time_gap = {
"date_histogram": {
"field": "layer_date",
"format": "yyyy-MM-dd'T'HH:mm:ssZ",
"interval": interval
}
}
aggs_dic['articles_over_time'] = time_gap
# adding aggreations on body query
if aggs_dic:
dic_query['aggs'] = aggs_dic
try:
res = requests.post(search_engine_endpoint, data=json.dumps(dic_query))
except Exception as e:
return 500, {"error": {"msg": str(e)}}
es_response = res.json()
if original_response:
return es_response
data = {}
if 'error' in es_response:
data["error"] = es_response["error"]
return 400, data
data["request_url"] = res.url
data["request_body"] = json.dumps(dic_query)
data["a.matchDocs"] = es_response['hits']['total']
docs = []
# aggreations response: facets searching
if 'aggregations' in es_response:
aggs = es_response['aggregations']
# getting the most frequently occurring users.
if 'popular_users' in aggs:
a_users_list_array = []
users_resp = aggs["popular_users"]["buckets"]
for item in users_resp:
temp = {}
temp['count'] = item['doc_count']
temp['value'] = item['key']
a_users_list_array.append(temp)
data["a.user"] = a_users_list_array
# getting most frequently ocurring words
if 'popular_text' in aggs:
a_text_list_array = []
text_resp = es_response["aggregations"]["popular_text"]["buckets"]
for item in text_resp:
temp = {}
temp['count'] = item['doc_count']
temp['value'] = item['key']
a_text_list_array.append(temp)
data["a.text"] = a_text_list_array
if 'articles_over_time' in aggs:
gap_count = []
a_gap = {}
gap_resp = aggs["articles_over_time"]["buckets"]
start = "*"
end = "*"
if len(gap_resp) > 0:
start = gap_resp[0]['key_as_string'].replace('+0000', 'z')
end = gap_resp[-1]['key_as_string'].replace('+0000', 'z')
a_gap['start'] = start
a_gap['end'] = end
a_gap['gap'] = a_time_gap
for item in gap_resp:
temp = {}
if item['doc_count'] != 0:
temp['count'] = item['doc_count']
temp['value'] = item['key_as_string'].replace('+0000', 'z')
gap_count.append(temp)
a_gap['counts'] = gap_count
data['a.time'] = a_gap
if 'range' in aggs:
# Work in progress
# Pay attention in the following code lines: Make it better!!!!
time_count = []
time_resp = aggs["range"]["buckets"]
a_time = {}
a_time['start'] = gte
a_time['end'] = lte
a_time['gap'] = None
for item in time_resp:
temp = {}
if item['doc_count'] != 0:
temp['count'] = item['doc_count']
temp['value'] = item['key'].replace('+0000', 'z')
time_count.append(temp)
a_time['counts'] = time_count
data['a.time'] = a_time
if not int(d_docs_limit) == 0:
for item in es_response['hits']['hits']:
# data
temp = item['_source']['abstract']
temp = temp.replace(u'\u201c', "\"")
temp = temp.replace(u'\u201d', "\"")
temp = temp.replace('"', "\"")
temp = temp.replace("'", "\'")
temp = temp.replace(u'\u2019', "\'")
item['_source']['abstract'] = temp
docs.append(item['_source'])
data["d.docs"] = docs
return data | https://www.elastic.co/guide/en/elasticsearch/reference/current/_the_search_api.html
:param serializer:
:return: | Below is the the instruction that describes the task:
### Input:
https://www.elastic.co/guide/en/elasticsearch/reference/current/_the_search_api.html
:param serializer:
:return:
### Response:
def elasticsearch(serializer, catalog):
"""
https://www.elastic.co/guide/en/elasticsearch/reference/current/_the_search_api.html
:param serializer:
:return:
"""
search_engine_endpoint = "{0}/{1}/_search".format(SEARCH_URL, catalog.slug)
q_text = serializer.validated_data.get("q_text")
q_time = serializer.validated_data.get("q_time")
q_geo = serializer.validated_data.get("q_geo")
q_user = serializer.validated_data.get("q_user")
d_docs_sort = serializer.validated_data.get("d_docs_sort")
d_docs_limit = int(serializer.validated_data.get("d_docs_limit"))
d_docs_page = int(serializer.validated_data.get("d_docs_page"))
a_text_limit = serializer.validated_data.get("a_text_limit")
a_user_limit = serializer.validated_data.get("a_user_limit")
a_time_gap = serializer.validated_data.get("a_time_gap")
a_time_limit = serializer.validated_data.get("a_time_limit")
original_response = serializer.validated_data.get("original_response")
# Dict for search on Elastic engine
must_array = []
filter_dic = {}
aggs_dic = {}
# get ES version to make the query builder to be backward compatible with
# diffs versions.
# TODO: move this to a proper place. maybe ES client?.
# TODO: cache it to avoid overwhelm ES with this call.
# TODO: ask for ES_VERSION when building queries with an elegant way.
ES_VERSION = 2
response = requests.get(SEARCH_URL)
if response.ok:
# looks ugly but will work on normal ES response for "/".
ES_VERSION = int(response.json()["version"]["number"][0])
# String searching
if q_text:
# Wrapping query string into a query filter.
if ES_VERSION >= 2:
query_string = {
"query_string": {
"query": q_text
}
}
else:
query_string = {
"query": {
"query_string": {
"query": q_text
}
}
}
# add string searching
must_array.append(query_string)
if q_time:
# check if q_time exists
q_time = str(q_time) # check string
shortener = q_time[1:-1]
shortener = shortener.split(" TO ")
gte = shortener[0] # greater than
lte = shortener[1] # less than
layer_date = {}
if gte == '*' and lte != '*':
layer_date["lte"] = lte
range_time = {
"layer_date": layer_date
}
range_time = {"range": range_time}
must_array.append(range_time)
if gte != '*' and lte == '*':
layer_date["gte"] = gte
range_time = {
"layer_date": layer_date
}
range_time = {"range": range_time}
must_array.append(range_time)
if gte != '*' and lte != '*':
layer_date["gte"] = gte
layer_date["lte"] = lte
range_time = {
"layer_date": layer_date
}
range_time = {"range": range_time}
must_array.append(range_time)
# geo_shape searching
if q_geo:
q_geo = str(q_geo)
q_geo = q_geo[1:-1]
Ymin, Xmin = q_geo.split(" TO ")[0].split(",")
Ymax, Xmax = q_geo.split(" TO ")[1].split(",")
geoshape_query = {
"layer_geoshape": {
"shape": {
"type": "envelope",
"coordinates": [[Xmin, Ymax], [Xmax, Ymin]]
},
"relation": "intersects"
}
}
filter_dic["geo_shape"] = geoshape_query
if q_user:
# Using q_user
user_searching = {
"match": {
"layer_originator": q_user
}
}
must_array.append(user_searching)
if ES_VERSION >= 2:
dic_query = {
"query": {
"bool": {
"must": must_array,
"filter": filter_dic
}
}
}
else:
dic_query = {
"query": {
"filtered": {
"filter": {
"bool": {
"must": must_array,
"should": filter_dic
}
}
}
}
}
# Page
if d_docs_limit:
dic_query["size"] = d_docs_limit
if d_docs_page:
dic_query["from"] = d_docs_limit * d_docs_page - d_docs_limit
if d_docs_sort == "score":
dic_query["sort"] = {"_score": {"order": "desc"}}
if d_docs_sort == "time":
dic_query["sort"] = {"layer_date": {"order": "desc"}}
if d_docs_sort == "distance":
if q_geo:
# distance_x = float(((float(Xmin) - float(Xmax)) ** 2.0) ** (0.5))
# distance_y = float(((float(Ymin) - float(Ymax)) ** 2.0) ** (0.5))
msg = ("Sorting by distance is different on ElasticSearch than Solr, because this"
"feature on elastic is unavailable to geo_shape type.ElasticSearch docs said:"
"Due to the complex input structure and index representation of shapes,"
"it is not currently possible to sort shapes or retrieve their fields directly."
"The geo_shape value is only retrievable through the _source field."
" Link: https://www.elastic.co/guide/en/elasticsearch/reference/current/geo-shape.html")
return {"error": {"msg": msg}}
else:
msg = "q_qeo MUST BE NO ZERO if you wanna sort by distance"
return {"error": {"msg": msg}}
if a_text_limit:
# getting most frequently occurring users.
text_limit = {
"terms": {
"field": "abstract",
"size": a_text_limit
}
}
aggs_dic['popular_text'] = text_limit
if a_user_limit:
# getting most frequently occurring users.
users_limit = {
"terms": {
"field": "layer_originator",
"size": a_user_limit
}
}
aggs_dic['popular_users'] = users_limit
if a_time_limit:
# TODO: Work in progress, a_time_limit is incomplete.
# TODO: when times are * it does not work. also a a_time_gap is not required.
if q_time:
if not a_time_gap:
# getting time limit histogram.
time_limt = {
"date_range": {
"field": "layer_date",
"format": "yyyy-MM-dd'T'HH:mm:ssZ",
"ranges": [
{"from": gte, "to": lte}
]
}
}
aggs_dic['range'] = time_limt
else:
pass
else:
msg = "If you want to use a_time_limit feature, q_time MUST BE initialized"
return {"error": {"msg": msg}}
if a_time_gap:
interval = gap_to_elastic(a_time_gap)
time_gap = {
"date_histogram": {
"field": "layer_date",
"format": "yyyy-MM-dd'T'HH:mm:ssZ",
"interval": interval
}
}
aggs_dic['articles_over_time'] = time_gap
# adding aggreations on body query
if aggs_dic:
dic_query['aggs'] = aggs_dic
try:
res = requests.post(search_engine_endpoint, data=json.dumps(dic_query))
except Exception as e:
return 500, {"error": {"msg": str(e)}}
es_response = res.json()
if original_response:
return es_response
data = {}
if 'error' in es_response:
data["error"] = es_response["error"]
return 400, data
data["request_url"] = res.url
data["request_body"] = json.dumps(dic_query)
data["a.matchDocs"] = es_response['hits']['total']
docs = []
# aggreations response: facets searching
if 'aggregations' in es_response:
aggs = es_response['aggregations']
# getting the most frequently occurring users.
if 'popular_users' in aggs:
a_users_list_array = []
users_resp = aggs["popular_users"]["buckets"]
for item in users_resp:
temp = {}
temp['count'] = item['doc_count']
temp['value'] = item['key']
a_users_list_array.append(temp)
data["a.user"] = a_users_list_array
# getting most frequently ocurring words
if 'popular_text' in aggs:
a_text_list_array = []
text_resp = es_response["aggregations"]["popular_text"]["buckets"]
for item in text_resp:
temp = {}
temp['count'] = item['doc_count']
temp['value'] = item['key']
a_text_list_array.append(temp)
data["a.text"] = a_text_list_array
if 'articles_over_time' in aggs:
gap_count = []
a_gap = {}
gap_resp = aggs["articles_over_time"]["buckets"]
start = "*"
end = "*"
if len(gap_resp) > 0:
start = gap_resp[0]['key_as_string'].replace('+0000', 'z')
end = gap_resp[-1]['key_as_string'].replace('+0000', 'z')
a_gap['start'] = start
a_gap['end'] = end
a_gap['gap'] = a_time_gap
for item in gap_resp:
temp = {}
if item['doc_count'] != 0:
temp['count'] = item['doc_count']
temp['value'] = item['key_as_string'].replace('+0000', 'z')
gap_count.append(temp)
a_gap['counts'] = gap_count
data['a.time'] = a_gap
if 'range' in aggs:
# Work in progress
# Pay attention in the following code lines: Make it better!!!!
time_count = []
time_resp = aggs["range"]["buckets"]
a_time = {}
a_time['start'] = gte
a_time['end'] = lte
a_time['gap'] = None
for item in time_resp:
temp = {}
if item['doc_count'] != 0:
temp['count'] = item['doc_count']
temp['value'] = item['key'].replace('+0000', 'z')
time_count.append(temp)
a_time['counts'] = time_count
data['a.time'] = a_time
if not int(d_docs_limit) == 0:
for item in es_response['hits']['hits']:
# data
temp = item['_source']['abstract']
temp = temp.replace(u'\u201c', "\"")
temp = temp.replace(u'\u201d', "\"")
temp = temp.replace('"', "\"")
temp = temp.replace("'", "\'")
temp = temp.replace(u'\u2019', "\'")
item['_source']['abstract'] = temp
docs.append(item['_source'])
data["d.docs"] = docs
return data |
def send(self, commands):
"""Send commands to LASAF through CAM-socket.
Parameters
----------
commands : list of tuples or bytes string
Commands as a list of tuples or a bytes string. cam.prefix is
allways prepended before sending.
Returns
-------
int
Bytes sent.
Example
-------
::
>>> # send list of tuples
>>> cam.send([('cmd', 'enableall'), ('value', 'true')])
>>> # send bytes string
>>> cam.send(b'/cmd:enableall /value:true')
"""
self.flush() # discard any waiting messages
msg = self._prepare_send(commands)
return self.socket.send(msg) | Send commands to LASAF through CAM-socket.
Parameters
----------
commands : list of tuples or bytes string
Commands as a list of tuples or a bytes string. cam.prefix is
allways prepended before sending.
Returns
-------
int
Bytes sent.
Example
-------
::
>>> # send list of tuples
>>> cam.send([('cmd', 'enableall'), ('value', 'true')])
>>> # send bytes string
>>> cam.send(b'/cmd:enableall /value:true') | Below is the the instruction that describes the task:
### Input:
Send commands to LASAF through CAM-socket.
Parameters
----------
commands : list of tuples or bytes string
Commands as a list of tuples or a bytes string. cam.prefix is
allways prepended before sending.
Returns
-------
int
Bytes sent.
Example
-------
::
>>> # send list of tuples
>>> cam.send([('cmd', 'enableall'), ('value', 'true')])
>>> # send bytes string
>>> cam.send(b'/cmd:enableall /value:true')
### Response:
def send(self, commands):
"""Send commands to LASAF through CAM-socket.
Parameters
----------
commands : list of tuples or bytes string
Commands as a list of tuples or a bytes string. cam.prefix is
allways prepended before sending.
Returns
-------
int
Bytes sent.
Example
-------
::
>>> # send list of tuples
>>> cam.send([('cmd', 'enableall'), ('value', 'true')])
>>> # send bytes string
>>> cam.send(b'/cmd:enableall /value:true')
"""
self.flush() # discard any waiting messages
msg = self._prepare_send(commands)
return self.socket.send(msg) |
def get_provider(self):
"""Gets the ``Resource`` representing the provider.
return: (osid.resource.Resource) - the provider
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
if 'providerId' not in self._my_map or not self._my_map['providerId']:
raise errors.IllegalState('this sourceable object has no provider set')
mgr = self._get_provider_manager('RESOURCE')
lookup_session = mgr.get_resource_lookup_session() # What about the Proxy?
lookup_session.use_federated_bin_view()
return lookup_session.get_resource(self.get_provider_id()) | Gets the ``Resource`` representing the provider.
return: (osid.resource.Resource) - the provider
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets the ``Resource`` representing the provider.
return: (osid.resource.Resource) - the provider
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
### Response:
def get_provider(self):
"""Gets the ``Resource`` representing the provider.
return: (osid.resource.Resource) - the provider
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
if 'providerId' not in self._my_map or not self._my_map['providerId']:
raise errors.IllegalState('this sourceable object has no provider set')
mgr = self._get_provider_manager('RESOURCE')
lookup_session = mgr.get_resource_lookup_session() # What about the Proxy?
lookup_session.use_federated_bin_view()
return lookup_session.get_resource(self.get_provider_id()) |
def cell(filename=None, mass=None, instrument=None, logging_mode="INFO",
cycle_mode=None, auto_summary=True):
"""Create a CellpyData object"""
from cellpy import log
log.setup_logging(default_level=logging_mode)
cellpy_instance = setup_cellpy_instance()
if instrument is not None:
cellpy_instance.set_instrument(instrument=instrument)
if cycle_mode is not None:
cellpy_instance.cycle_mode = cycle_mode
if filename is not None:
filename = Path(filename)
if filename.suffix in [".h5", ".hdf5", ".cellpy", ".cpy"]:
logging.info(f"Loading cellpy-file: {filename}")
cellpy_instance.load(filename)
else:
logging.info(f"Loading raw-file: {filename}")
cellpy_instance.from_raw(filename)
if mass is not None:
logging.info("Setting mass")
cellpy_instance.set_mass(mass)
if auto_summary:
logging.info("Creating step table")
cellpy_instance.make_step_table()
logging.info("Creating summary data")
cellpy_instance.make_summary()
logging.info("Created CellpyData object")
return cellpy_instance | Create a CellpyData object | Below is the the instruction that describes the task:
### Input:
Create a CellpyData object
### Response:
def cell(filename=None, mass=None, instrument=None, logging_mode="INFO",
cycle_mode=None, auto_summary=True):
"""Create a CellpyData object"""
from cellpy import log
log.setup_logging(default_level=logging_mode)
cellpy_instance = setup_cellpy_instance()
if instrument is not None:
cellpy_instance.set_instrument(instrument=instrument)
if cycle_mode is not None:
cellpy_instance.cycle_mode = cycle_mode
if filename is not None:
filename = Path(filename)
if filename.suffix in [".h5", ".hdf5", ".cellpy", ".cpy"]:
logging.info(f"Loading cellpy-file: {filename}")
cellpy_instance.load(filename)
else:
logging.info(f"Loading raw-file: {filename}")
cellpy_instance.from_raw(filename)
if mass is not None:
logging.info("Setting mass")
cellpy_instance.set_mass(mass)
if auto_summary:
logging.info("Creating step table")
cellpy_instance.make_step_table()
logging.info("Creating summary data")
cellpy_instance.make_summary()
logging.info("Created CellpyData object")
return cellpy_instance |
def lockFile(self, fileName, byteOffset, length, dokanFileInfo):
"""Lock a file.
:param fileName: name of file to lock
:type fileName: ctypes.c_wchar_p
:param byteOffset: location to start lock
:type byteOffset: ctypes.c_longlong
:param length: number of bytes to lock
:type length: ctypes.c_longlong
:param dokanFileInfo: used by Dokan
:type dokanFileInfo: PDOKAN_FILE_INFO
:return: error code
:rtype: ctypes.c_int
"""
return self.operations('lockFile', fileName, byteOffset, length) | Lock a file.
:param fileName: name of file to lock
:type fileName: ctypes.c_wchar_p
:param byteOffset: location to start lock
:type byteOffset: ctypes.c_longlong
:param length: number of bytes to lock
:type length: ctypes.c_longlong
:param dokanFileInfo: used by Dokan
:type dokanFileInfo: PDOKAN_FILE_INFO
:return: error code
:rtype: ctypes.c_int | Below is the the instruction that describes the task:
### Input:
Lock a file.
:param fileName: name of file to lock
:type fileName: ctypes.c_wchar_p
:param byteOffset: location to start lock
:type byteOffset: ctypes.c_longlong
:param length: number of bytes to lock
:type length: ctypes.c_longlong
:param dokanFileInfo: used by Dokan
:type dokanFileInfo: PDOKAN_FILE_INFO
:return: error code
:rtype: ctypes.c_int
### Response:
def lockFile(self, fileName, byteOffset, length, dokanFileInfo):
"""Lock a file.
:param fileName: name of file to lock
:type fileName: ctypes.c_wchar_p
:param byteOffset: location to start lock
:type byteOffset: ctypes.c_longlong
:param length: number of bytes to lock
:type length: ctypes.c_longlong
:param dokanFileInfo: used by Dokan
:type dokanFileInfo: PDOKAN_FILE_INFO
:return: error code
:rtype: ctypes.c_int
"""
return self.operations('lockFile', fileName, byteOffset, length) |
def unlock():
'''
Unlocks the candidate configuration.
CLI Example:
.. code-block:: bash
salt 'device_name' junos.unlock
'''
conn = __proxy__['junos.conn']()
ret = {}
ret['out'] = True
try:
conn.cu.unlock()
ret['message'] = "Successfully unlocked the configuration."
except jnpr.junos.exception.UnlockError as exception:
ret['message'] = \
'Could not unlock configuration due to : "{0}"'.format(exception)
ret['out'] = False
return ret | Unlocks the candidate configuration.
CLI Example:
.. code-block:: bash
salt 'device_name' junos.unlock | Below is the the instruction that describes the task:
### Input:
Unlocks the candidate configuration.
CLI Example:
.. code-block:: bash
salt 'device_name' junos.unlock
### Response:
def unlock():
'''
Unlocks the candidate configuration.
CLI Example:
.. code-block:: bash
salt 'device_name' junos.unlock
'''
conn = __proxy__['junos.conn']()
ret = {}
ret['out'] = True
try:
conn.cu.unlock()
ret['message'] = "Successfully unlocked the configuration."
except jnpr.junos.exception.UnlockError as exception:
ret['message'] = \
'Could not unlock configuration due to : "{0}"'.format(exception)
ret['out'] = False
return ret |
def is_array(self, data_type):
'''Check if a type is a known array type
Args:
data_type (str): Name of type to check
Returns:
True if ``data_type`` is a known array type.
'''
# Split off any brackets
data_type = data_type.split('[')[0].strip()
return data_type.lower() in self.array_types | Check if a type is a known array type
Args:
data_type (str): Name of type to check
Returns:
True if ``data_type`` is a known array type. | Below is the the instruction that describes the task:
### Input:
Check if a type is a known array type
Args:
data_type (str): Name of type to check
Returns:
True if ``data_type`` is a known array type.
### Response:
def is_array(self, data_type):
'''Check if a type is a known array type
Args:
data_type (str): Name of type to check
Returns:
True if ``data_type`` is a known array type.
'''
# Split off any brackets
data_type = data_type.split('[')[0].strip()
return data_type.lower() in self.array_types |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.