code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def __set_timestamp(self, clock):
"""
If "clock" is None, set the time now.
This function is called self.__init__()
"""
if clock is None:
unix_timestamp = time.mktime(
datetime.datetime.now().utctimetuple()
)
timestamp = int(unix_timestamp)
return timestamp
else:
return clock | If "clock" is None, set the time now.
This function is called self.__init__() | Below is the the instruction that describes the task:
### Input:
If "clock" is None, set the time now.
This function is called self.__init__()
### Response:
def __set_timestamp(self, clock):
"""
If "clock" is None, set the time now.
This function is called self.__init__()
"""
if clock is None:
unix_timestamp = time.mktime(
datetime.datetime.now().utctimetuple()
)
timestamp = int(unix_timestamp)
return timestamp
else:
return clock |
def unquote_filename(name, win32=(sys.platform=='win32')):
""" On Windows, remove leading and trailing quotes from filenames.
"""
if win32:
if name.startswith(("'", '"')) and name.endswith(("'", '"')):
name = name[1:-1]
return name | On Windows, remove leading and trailing quotes from filenames. | Below is the the instruction that describes the task:
### Input:
On Windows, remove leading and trailing quotes from filenames.
### Response:
def unquote_filename(name, win32=(sys.platform=='win32')):
""" On Windows, remove leading and trailing quotes from filenames.
"""
if win32:
if name.startswith(("'", '"')) and name.endswith(("'", '"')):
name = name[1:-1]
return name |
def switch_to_line_in(self, source=None):
""" Switch the speaker's input to line-in.
Args:
source (SoCo): The speaker whose line-in should be played.
Default is line-in from the speaker itself.
"""
if source:
uid = source.uid
else:
uid = self.uid
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', 'x-rincon-stream:{0}'.format(uid)),
('CurrentURIMetaData', '')
]) | Switch the speaker's input to line-in.
Args:
source (SoCo): The speaker whose line-in should be played.
Default is line-in from the speaker itself. | Below is the the instruction that describes the task:
### Input:
Switch the speaker's input to line-in.
Args:
source (SoCo): The speaker whose line-in should be played.
Default is line-in from the speaker itself.
### Response:
def switch_to_line_in(self, source=None):
""" Switch the speaker's input to line-in.
Args:
source (SoCo): The speaker whose line-in should be played.
Default is line-in from the speaker itself.
"""
if source:
uid = source.uid
else:
uid = self.uid
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', 'x-rincon-stream:{0}'.format(uid)),
('CurrentURIMetaData', '')
]) |
def load(self, pkcs11dll_filename=None, *init_string):
"""
load a PKCS#11 library
:type pkcs11dll_filename: string
:param pkcs11dll_filename: the library name.
If this parameter is not set then the environment variable
`PYKCS11LIB` is used instead
:returns: a :class:`PyKCS11Lib` object
:raises: :class:`PyKCS11Error` (-1): when the load fails
"""
if pkcs11dll_filename is None:
pkcs11dll_filename = os.getenv("PYKCS11LIB")
if pkcs11dll_filename is None:
raise PyKCS11Error(-1, "No PKCS11 library specified (set PYKCS11LIB env variable)")
rv = self.lib.Load(pkcs11dll_filename)
if rv == 0:
raise PyKCS11Error(-1, pkcs11dll_filename) | load a PKCS#11 library
:type pkcs11dll_filename: string
:param pkcs11dll_filename: the library name.
If this parameter is not set then the environment variable
`PYKCS11LIB` is used instead
:returns: a :class:`PyKCS11Lib` object
:raises: :class:`PyKCS11Error` (-1): when the load fails | Below is the the instruction that describes the task:
### Input:
load a PKCS#11 library
:type pkcs11dll_filename: string
:param pkcs11dll_filename: the library name.
If this parameter is not set then the environment variable
`PYKCS11LIB` is used instead
:returns: a :class:`PyKCS11Lib` object
:raises: :class:`PyKCS11Error` (-1): when the load fails
### Response:
def load(self, pkcs11dll_filename=None, *init_string):
"""
load a PKCS#11 library
:type pkcs11dll_filename: string
:param pkcs11dll_filename: the library name.
If this parameter is not set then the environment variable
`PYKCS11LIB` is used instead
:returns: a :class:`PyKCS11Lib` object
:raises: :class:`PyKCS11Error` (-1): when the load fails
"""
if pkcs11dll_filename is None:
pkcs11dll_filename = os.getenv("PYKCS11LIB")
if pkcs11dll_filename is None:
raise PyKCS11Error(-1, "No PKCS11 library specified (set PYKCS11LIB env variable)")
rv = self.lib.Load(pkcs11dll_filename)
if rv == 0:
raise PyKCS11Error(-1, pkcs11dll_filename) |
def _read_depth_image(self):
""" Reads a depth image from the device """
# read raw uint16 buffer
im_arr = self._depth_stream.read_frame()
raw_buf = im_arr.get_buffer_as_uint16()
buf_array = np.array([raw_buf[i] for i in range(PrimesenseSensor.DEPTH_IM_WIDTH * PrimesenseSensor.DEPTH_IM_HEIGHT)])
# convert to image in meters
depth_image = buf_array.reshape(PrimesenseSensor.DEPTH_IM_HEIGHT,
PrimesenseSensor.DEPTH_IM_WIDTH)
depth_image = depth_image * MM_TO_METERS # convert to meters
if self._flip_images:
depth_image = np.flipud(depth_image)
else:
depth_image = np.fliplr(depth_image)
return DepthImage(depth_image, frame=self._frame) | Reads a depth image from the device | Below is the the instruction that describes the task:
### Input:
Reads a depth image from the device
### Response:
def _read_depth_image(self):
""" Reads a depth image from the device """
# read raw uint16 buffer
im_arr = self._depth_stream.read_frame()
raw_buf = im_arr.get_buffer_as_uint16()
buf_array = np.array([raw_buf[i] for i in range(PrimesenseSensor.DEPTH_IM_WIDTH * PrimesenseSensor.DEPTH_IM_HEIGHT)])
# convert to image in meters
depth_image = buf_array.reshape(PrimesenseSensor.DEPTH_IM_HEIGHT,
PrimesenseSensor.DEPTH_IM_WIDTH)
depth_image = depth_image * MM_TO_METERS # convert to meters
if self._flip_images:
depth_image = np.flipud(depth_image)
else:
depth_image = np.fliplr(depth_image)
return DepthImage(depth_image, frame=self._frame) |
def _number_zero_start_handler(c, ctx):
"""Handles numeric values that start with zero or negative zero. Branches to delegate co-routines according to
_ZERO_START_TABLE.
"""
assert c == _ZERO
assert len(ctx.value) == 0 or (len(ctx.value) == 1 and ctx.value[0] == _MINUS)
ctx.set_ion_type(IonType.INT)
ctx.value.append(c)
c, _ = yield
if _ends_value(c):
trans = ctx.event_transition(IonThunkEvent, IonEventType.SCALAR, ctx.ion_type, _parse_decimal_int(ctx.value))
if c == _SLASH:
trans = ctx.immediate_transition(_number_slash_end_handler(c, ctx, trans))
yield trans
yield ctx.immediate_transition(_ZERO_START_TABLE[c](c, ctx)) | Handles numeric values that start with zero or negative zero. Branches to delegate co-routines according to
_ZERO_START_TABLE. | Below is the the instruction that describes the task:
### Input:
Handles numeric values that start with zero or negative zero. Branches to delegate co-routines according to
_ZERO_START_TABLE.
### Response:
def _number_zero_start_handler(c, ctx):
"""Handles numeric values that start with zero or negative zero. Branches to delegate co-routines according to
_ZERO_START_TABLE.
"""
assert c == _ZERO
assert len(ctx.value) == 0 or (len(ctx.value) == 1 and ctx.value[0] == _MINUS)
ctx.set_ion_type(IonType.INT)
ctx.value.append(c)
c, _ = yield
if _ends_value(c):
trans = ctx.event_transition(IonThunkEvent, IonEventType.SCALAR, ctx.ion_type, _parse_decimal_int(ctx.value))
if c == _SLASH:
trans = ctx.immediate_transition(_number_slash_end_handler(c, ctx, trans))
yield trans
yield ctx.immediate_transition(_ZERO_START_TABLE[c](c, ctx)) |
def resume_sm(self, xmlstream):
"""
Resume an SM-enabled stream using the given `xmlstream`.
If the server rejects the attempt to resume stream management, a
:class:`.errors.StreamNegotiationFailure` is raised. The stream is then
in stopped state and stream management has been stopped.
.. warning::
This method cannot and does not check whether the server advertised
support for stream management. Attempting to negotiate stream
management without server support might lead to termination of the
stream.
If the XML stream dies at any point during the negotiation, the SM
state is left unchanged. If no response has been received yet, the
exception which caused the stream to die is re-raised. The state of the
stream depends on whether the main task already noticed the dead
stream.
If negotiation succeeds, this coroutine resumes the stream management
session and initiates the retransmission of any unacked stanzas. The
stream is then in running state.
.. versionchanged:: 0.11
Support for using the counter value provided some servers on a
failed resumption was added. Stanzas which are covered by the
counter will be marked as :attr:`~StanzaState.ACKED`; other stanzas
will be marked as :attr:`~StanzaState.DISCONNECTED`.
This is in contrast to the behaviour when resumption fails
*without* a counter given. In that case, stanzas which have not
been acked are marked as :attr:`~StanzaState.SENT_WITHOUT_SM`.
"""
if self.running:
raise RuntimeError("Cannot resume Stream Management while"
" StanzaStream is running")
self._start_prepare(xmlstream, self.recv_stanza)
try:
response = yield from protocol.send_and_wait_for(
xmlstream,
[
nonza.SMResume(previd=self.sm_id,
counter=self._sm_inbound_ctr)
],
[
nonza.SMResumed,
nonza.SMFailed
]
)
if isinstance(response, nonza.SMFailed):
exc = errors.StreamNegotiationFailure(
"Server rejected SM resumption"
)
if response.counter is not None:
self.sm_ack(response.counter)
self._clear_unacked(StanzaState.DISCONNECTED)
xmlstream.stanza_parser.remove_class(
nonza.SMRequest)
xmlstream.stanza_parser.remove_class(
nonza.SMAcknowledgement)
self.stop_sm()
raise exc
self._resume_sm(response.counter)
except: # NOQA
self._start_rollback(xmlstream)
raise
self._start_commit(xmlstream) | Resume an SM-enabled stream using the given `xmlstream`.
If the server rejects the attempt to resume stream management, a
:class:`.errors.StreamNegotiationFailure` is raised. The stream is then
in stopped state and stream management has been stopped.
.. warning::
This method cannot and does not check whether the server advertised
support for stream management. Attempting to negotiate stream
management without server support might lead to termination of the
stream.
If the XML stream dies at any point during the negotiation, the SM
state is left unchanged. If no response has been received yet, the
exception which caused the stream to die is re-raised. The state of the
stream depends on whether the main task already noticed the dead
stream.
If negotiation succeeds, this coroutine resumes the stream management
session and initiates the retransmission of any unacked stanzas. The
stream is then in running state.
.. versionchanged:: 0.11
Support for using the counter value provided some servers on a
failed resumption was added. Stanzas which are covered by the
counter will be marked as :attr:`~StanzaState.ACKED`; other stanzas
will be marked as :attr:`~StanzaState.DISCONNECTED`.
This is in contrast to the behaviour when resumption fails
*without* a counter given. In that case, stanzas which have not
been acked are marked as :attr:`~StanzaState.SENT_WITHOUT_SM`. | Below is the the instruction that describes the task:
### Input:
Resume an SM-enabled stream using the given `xmlstream`.
If the server rejects the attempt to resume stream management, a
:class:`.errors.StreamNegotiationFailure` is raised. The stream is then
in stopped state and stream management has been stopped.
.. warning::
This method cannot and does not check whether the server advertised
support for stream management. Attempting to negotiate stream
management without server support might lead to termination of the
stream.
If the XML stream dies at any point during the negotiation, the SM
state is left unchanged. If no response has been received yet, the
exception which caused the stream to die is re-raised. The state of the
stream depends on whether the main task already noticed the dead
stream.
If negotiation succeeds, this coroutine resumes the stream management
session and initiates the retransmission of any unacked stanzas. The
stream is then in running state.
.. versionchanged:: 0.11
Support for using the counter value provided some servers on a
failed resumption was added. Stanzas which are covered by the
counter will be marked as :attr:`~StanzaState.ACKED`; other stanzas
will be marked as :attr:`~StanzaState.DISCONNECTED`.
This is in contrast to the behaviour when resumption fails
*without* a counter given. In that case, stanzas which have not
been acked are marked as :attr:`~StanzaState.SENT_WITHOUT_SM`.
### Response:
def resume_sm(self, xmlstream):
"""
Resume an SM-enabled stream using the given `xmlstream`.
If the server rejects the attempt to resume stream management, a
:class:`.errors.StreamNegotiationFailure` is raised. The stream is then
in stopped state and stream management has been stopped.
.. warning::
This method cannot and does not check whether the server advertised
support for stream management. Attempting to negotiate stream
management without server support might lead to termination of the
stream.
If the XML stream dies at any point during the negotiation, the SM
state is left unchanged. If no response has been received yet, the
exception which caused the stream to die is re-raised. The state of the
stream depends on whether the main task already noticed the dead
stream.
If negotiation succeeds, this coroutine resumes the stream management
session and initiates the retransmission of any unacked stanzas. The
stream is then in running state.
.. versionchanged:: 0.11
Support for using the counter value provided some servers on a
failed resumption was added. Stanzas which are covered by the
counter will be marked as :attr:`~StanzaState.ACKED`; other stanzas
will be marked as :attr:`~StanzaState.DISCONNECTED`.
This is in contrast to the behaviour when resumption fails
*without* a counter given. In that case, stanzas which have not
been acked are marked as :attr:`~StanzaState.SENT_WITHOUT_SM`.
"""
if self.running:
raise RuntimeError("Cannot resume Stream Management while"
" StanzaStream is running")
self._start_prepare(xmlstream, self.recv_stanza)
try:
response = yield from protocol.send_and_wait_for(
xmlstream,
[
nonza.SMResume(previd=self.sm_id,
counter=self._sm_inbound_ctr)
],
[
nonza.SMResumed,
nonza.SMFailed
]
)
if isinstance(response, nonza.SMFailed):
exc = errors.StreamNegotiationFailure(
"Server rejected SM resumption"
)
if response.counter is not None:
self.sm_ack(response.counter)
self._clear_unacked(StanzaState.DISCONNECTED)
xmlstream.stanza_parser.remove_class(
nonza.SMRequest)
xmlstream.stanza_parser.remove_class(
nonza.SMAcknowledgement)
self.stop_sm()
raise exc
self._resume_sm(response.counter)
except: # NOQA
self._start_rollback(xmlstream)
raise
self._start_commit(xmlstream) |
def isochrone(self,age,feh=0.0,minm=None,maxm=None,dm=0.02,
return_df=True,distance=None,AV=0.0):
"""
Returns stellar models at constant age and feh, for a range of masses
:param age:
log10(age) of desired isochrone.
:param feh: (optional)
Metallicity of desired isochrone (default = 0.0)
:param minm, maxm: (optional)
Mass range of desired isochrone (will default to max and min available)
:param dm: (optional)
Spacing in mass of desired isochrone. Default = 0.02 Msun.
:param return_df: (optional)
Whether to return a :class:``pandas.DataFrame`` or dictionary. Default is ``True``.
:param distance:
Distance in pc. If passed, then mags will be converted to
apparent mags based on distance (and ``AV``).
:param AV:
V-band extinction (magnitudes).
:return:
:class:`pandas.DataFrame` or dictionary containing results.
"""
if minm is None:
minm = self.minmass
if maxm is None:
maxm = self.maxmass
ms = np.arange(minm,maxm,dm)
ages = np.ones(ms.shape)*age
Ms = self.mass(ms,ages,feh)
Rs = self.radius(ms,ages,feh)
logLs = self.logL(ms,ages,feh)
loggs = self.logg(ms,ages,feh)
Teffs = self.Teff(ms,ages,feh)
mags = {band:self.mag[band](ms,ages,feh) for band in self.bands}
#for band in self.bands:
# mags[band] = self.mag[band](ms,ages)
if distance is not None:
dm = 5*np.log10(distance) - 5
for band in mags:
A = AV*EXTINCTION[band]
mags[band] = mags[band] + dm + A
props = {'M':Ms,'R':Rs,'logL':logLs,'logg':loggs,
'Teff':Teffs,'mag':mags}
if not return_df:
return props
else:
d = {}
for key in props.keys():
if key=='mag':
for m in props['mag'].keys():
d['{}_mag'.format(m)] = props['mag'][m]
else:
d[key] = props[key]
try:
df = pd.DataFrame(d)
except ValueError:
df = pd.DataFrame(d, index=[0])
return df | Returns stellar models at constant age and feh, for a range of masses
:param age:
log10(age) of desired isochrone.
:param feh: (optional)
Metallicity of desired isochrone (default = 0.0)
:param minm, maxm: (optional)
Mass range of desired isochrone (will default to max and min available)
:param dm: (optional)
Spacing in mass of desired isochrone. Default = 0.02 Msun.
:param return_df: (optional)
Whether to return a :class:``pandas.DataFrame`` or dictionary. Default is ``True``.
:param distance:
Distance in pc. If passed, then mags will be converted to
apparent mags based on distance (and ``AV``).
:param AV:
V-band extinction (magnitudes).
:return:
:class:`pandas.DataFrame` or dictionary containing results. | Below is the the instruction that describes the task:
### Input:
Returns stellar models at constant age and feh, for a range of masses
:param age:
log10(age) of desired isochrone.
:param feh: (optional)
Metallicity of desired isochrone (default = 0.0)
:param minm, maxm: (optional)
Mass range of desired isochrone (will default to max and min available)
:param dm: (optional)
Spacing in mass of desired isochrone. Default = 0.02 Msun.
:param return_df: (optional)
Whether to return a :class:``pandas.DataFrame`` or dictionary. Default is ``True``.
:param distance:
Distance in pc. If passed, then mags will be converted to
apparent mags based on distance (and ``AV``).
:param AV:
V-band extinction (magnitudes).
:return:
:class:`pandas.DataFrame` or dictionary containing results.
### Response:
def isochrone(self,age,feh=0.0,minm=None,maxm=None,dm=0.02,
return_df=True,distance=None,AV=0.0):
"""
Returns stellar models at constant age and feh, for a range of masses
:param age:
log10(age) of desired isochrone.
:param feh: (optional)
Metallicity of desired isochrone (default = 0.0)
:param minm, maxm: (optional)
Mass range of desired isochrone (will default to max and min available)
:param dm: (optional)
Spacing in mass of desired isochrone. Default = 0.02 Msun.
:param return_df: (optional)
Whether to return a :class:``pandas.DataFrame`` or dictionary. Default is ``True``.
:param distance:
Distance in pc. If passed, then mags will be converted to
apparent mags based on distance (and ``AV``).
:param AV:
V-band extinction (magnitudes).
:return:
:class:`pandas.DataFrame` or dictionary containing results.
"""
if minm is None:
minm = self.minmass
if maxm is None:
maxm = self.maxmass
ms = np.arange(minm,maxm,dm)
ages = np.ones(ms.shape)*age
Ms = self.mass(ms,ages,feh)
Rs = self.radius(ms,ages,feh)
logLs = self.logL(ms,ages,feh)
loggs = self.logg(ms,ages,feh)
Teffs = self.Teff(ms,ages,feh)
mags = {band:self.mag[band](ms,ages,feh) for band in self.bands}
#for band in self.bands:
# mags[band] = self.mag[band](ms,ages)
if distance is not None:
dm = 5*np.log10(distance) - 5
for band in mags:
A = AV*EXTINCTION[band]
mags[band] = mags[band] + dm + A
props = {'M':Ms,'R':Rs,'logL':logLs,'logg':loggs,
'Teff':Teffs,'mag':mags}
if not return_df:
return props
else:
d = {}
for key in props.keys():
if key=='mag':
for m in props['mag'].keys():
d['{}_mag'.format(m)] = props['mag'][m]
else:
d[key] = props[key]
try:
df = pd.DataFrame(d)
except ValueError:
df = pd.DataFrame(d, index=[0])
return df |
def div(x, y, context=None):
"""
Return ``x`` divided by ``y``.
"""
return _apply_function_in_current_context(
BigFloat,
mpfr.mpfr_div,
(
BigFloat._implicit_convert(x),
BigFloat._implicit_convert(y),
),
context,
) | Return ``x`` divided by ``y``. | Below is the the instruction that describes the task:
### Input:
Return ``x`` divided by ``y``.
### Response:
def div(x, y, context=None):
"""
Return ``x`` divided by ``y``.
"""
return _apply_function_in_current_context(
BigFloat,
mpfr.mpfr_div,
(
BigFloat._implicit_convert(x),
BigFloat._implicit_convert(y),
),
context,
) |
def get(ctx):
"""Get info for current project, by project_name, or user/project_name.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
To get current project:
\b
```bash
$ polyaxon project get
```
To get a project by name
\b
```bash
$ polyaxon project get user/project
```
"""
user, project_name = get_project_or_local(ctx.obj.get('project'))
try:
response = PolyaxonClient().project.get_project(user, project_name)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not get project `{}`.'.format(project_name))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
get_project_details(response) | Get info for current project, by project_name, or user/project_name.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
To get current project:
\b
```bash
$ polyaxon project get
```
To get a project by name
\b
```bash
$ polyaxon project get user/project
``` | Below is the the instruction that describes the task:
### Input:
Get info for current project, by project_name, or user/project_name.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
To get current project:
\b
```bash
$ polyaxon project get
```
To get a project by name
\b
```bash
$ polyaxon project get user/project
```
### Response:
def get(ctx):
"""Get info for current project, by project_name, or user/project_name.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
To get current project:
\b
```bash
$ polyaxon project get
```
To get a project by name
\b
```bash
$ polyaxon project get user/project
```
"""
user, project_name = get_project_or_local(ctx.obj.get('project'))
try:
response = PolyaxonClient().project.get_project(user, project_name)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not get project `{}`.'.format(project_name))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
get_project_details(response) |
def parent(self):
"""Get the parent package family.
Returns:
`PackageFamily`.
"""
family = self.repository.get_parent_package_family(self.resource)
return PackageFamily(family) if family else None | Get the parent package family.
Returns:
`PackageFamily`. | Below is the the instruction that describes the task:
### Input:
Get the parent package family.
Returns:
`PackageFamily`.
### Response:
def parent(self):
"""Get the parent package family.
Returns:
`PackageFamily`.
"""
family = self.repository.get_parent_package_family(self.resource)
return PackageFamily(family) if family else None |
def interm_fluent_ordering(self) -> List[str]:
'''The list of intermediate-fluent names in canonical order.
Returns:
List[str]: A list of fluent names.
'''
interm_fluents = self.intermediate_fluents.values()
key = lambda pvar: (pvar.level, pvar.name)
return [str(pvar) for pvar in sorted(interm_fluents, key=key)] | The list of intermediate-fluent names in canonical order.
Returns:
List[str]: A list of fluent names. | Below is the the instruction that describes the task:
### Input:
The list of intermediate-fluent names in canonical order.
Returns:
List[str]: A list of fluent names.
### Response:
def interm_fluent_ordering(self) -> List[str]:
'''The list of intermediate-fluent names in canonical order.
Returns:
List[str]: A list of fluent names.
'''
interm_fluents = self.intermediate_fluents.values()
key = lambda pvar: (pvar.level, pvar.name)
return [str(pvar) for pvar in sorted(interm_fluents, key=key)] |
def authenticate():
"""Authenticate via already provided configuration.
This is called once automatically per session when uploading and rendering a visualization."""
key = PyGraphistry.api_key()
#Mocks may set to True, so bypass in that case
if (key is None) and PyGraphistry._is_authenticated == False:
util.error('API key not set explicitly in `register()` or available at ' + EnvVarNames['api_key'])
if not PyGraphistry._is_authenticated:
PyGraphistry._check_key_and_version()
PyGraphistry._is_authenticated = True | Authenticate via already provided configuration.
This is called once automatically per session when uploading and rendering a visualization. | Below is the the instruction that describes the task:
### Input:
Authenticate via already provided configuration.
This is called once automatically per session when uploading and rendering a visualization.
### Response:
def authenticate():
"""Authenticate via already provided configuration.
This is called once automatically per session when uploading and rendering a visualization."""
key = PyGraphistry.api_key()
#Mocks may set to True, so bypass in that case
if (key is None) and PyGraphistry._is_authenticated == False:
util.error('API key not set explicitly in `register()` or available at ' + EnvVarNames['api_key'])
if not PyGraphistry._is_authenticated:
PyGraphistry._check_key_and_version()
PyGraphistry._is_authenticated = True |
def allRoles(self, memo=None):
"""
Identify all the roles that this role is authorized to act as.
@param memo: used only for recursion. Do not pass this.
@return: an iterator of all roles that this role is a member of,
including itself.
"""
if memo is None:
memo = set()
elif self in memo:
# this is bad, but we have successfully detected and prevented the
# only really bad symptom, an infinite loop.
return
memo.add(self)
yield self
for groupRole in self.store.query(Role,
AND(RoleRelationship.member == self,
RoleRelationship.group == Role.storeID)):
for roleRole in groupRole.allRoles(memo):
yield roleRole | Identify all the roles that this role is authorized to act as.
@param memo: used only for recursion. Do not pass this.
@return: an iterator of all roles that this role is a member of,
including itself. | Below is the the instruction that describes the task:
### Input:
Identify all the roles that this role is authorized to act as.
@param memo: used only for recursion. Do not pass this.
@return: an iterator of all roles that this role is a member of,
including itself.
### Response:
def allRoles(self, memo=None):
"""
Identify all the roles that this role is authorized to act as.
@param memo: used only for recursion. Do not pass this.
@return: an iterator of all roles that this role is a member of,
including itself.
"""
if memo is None:
memo = set()
elif self in memo:
# this is bad, but we have successfully detected and prevented the
# only really bad symptom, an infinite loop.
return
memo.add(self)
yield self
for groupRole in self.store.query(Role,
AND(RoleRelationship.member == self,
RoleRelationship.group == Role.storeID)):
for roleRole in groupRole.allRoles(memo):
yield roleRole |
def standings(date=datetime.now()):
"""Return Standings object that contains standings info
date should be a datetime object,
leave empty to get current standings
"""
data = mlbgame.info.standings(date)
return mlbgame.info.Standings(data) | Return Standings object that contains standings info
date should be a datetime object,
leave empty to get current standings | Below is the the instruction that describes the task:
### Input:
Return Standings object that contains standings info
date should be a datetime object,
leave empty to get current standings
### Response:
def standings(date=datetime.now()):
"""Return Standings object that contains standings info
date should be a datetime object,
leave empty to get current standings
"""
data = mlbgame.info.standings(date)
return mlbgame.info.Standings(data) |
def _reqs(self, tag):
""" Grab all the pull requests """
return [
(tag, i) for i in
self.client.get_pulls(*tag.split('/'))
] | Grab all the pull requests | Below is the the instruction that describes the task:
### Input:
Grab all the pull requests
### Response:
def _reqs(self, tag):
""" Grab all the pull requests """
return [
(tag, i) for i in
self.client.get_pulls(*tag.split('/'))
] |
def audio_graph(
chunksize_bytes=DEFAULT_CHUNK_SIZE,
resample_to=SR44100(),
store_fft=False):
"""
Produce a base class suitable as a starting point for many audio processing
pipelines. This class resamples all audio to a common sampling rate, and
produces a bark band spectrogram from overlapping short-time fourier
transform frames. It also compresses the audio into ogg vorbis format for
compact storage.
"""
band = FrequencyBand(20, resample_to.nyquist)
class AudioGraph(BaseModel):
meta = JSONFeature(
MetaData,
store=True,
encoder=AudioMetaDataEncoder)
raw = ByteStreamFeature(
ByteStream,
chunksize=chunksize_bytes,
needs=meta,
store=False)
ogg = OggVorbisFeature(
OggVorbis,
needs=raw,
store=True)
pcm = AudioSamplesFeature(
AudioStream,
needs=raw,
store=False)
resampled = AudioSamplesFeature(
Resampler,
needs=pcm,
samplerate=resample_to,
store=False)
windowed = ArrayWithUnitsFeature(
SlidingWindow,
needs=resampled,
wscheme=HalfLapped(),
wfunc=OggVorbisWindowingFunc(),
store=False)
dct = ArrayWithUnitsFeature(
DCT,
needs=windowed,
store=True)
fft = ArrayWithUnitsFeature(
FFT,
needs=windowed,
store=store_fft)
bark = ArrayWithUnitsFeature(
BarkBands,
needs=fft,
frequency_band=band,
store=True)
centroid = ArrayWithUnitsFeature(
SpectralCentroid,
needs=bark,
store=True)
chroma = ArrayWithUnitsFeature(
Chroma,
needs=fft,
frequency_band=band,
store=True)
bfcc = ArrayWithUnitsFeature(
BFCC,
needs=fft,
store=True)
return AudioGraph | Produce a base class suitable as a starting point for many audio processing
pipelines. This class resamples all audio to a common sampling rate, and
produces a bark band spectrogram from overlapping short-time fourier
transform frames. It also compresses the audio into ogg vorbis format for
compact storage. | Below is the the instruction that describes the task:
### Input:
Produce a base class suitable as a starting point for many audio processing
pipelines. This class resamples all audio to a common sampling rate, and
produces a bark band spectrogram from overlapping short-time fourier
transform frames. It also compresses the audio into ogg vorbis format for
compact storage.
### Response:
def audio_graph(
chunksize_bytes=DEFAULT_CHUNK_SIZE,
resample_to=SR44100(),
store_fft=False):
"""
Produce a base class suitable as a starting point for many audio processing
pipelines. This class resamples all audio to a common sampling rate, and
produces a bark band spectrogram from overlapping short-time fourier
transform frames. It also compresses the audio into ogg vorbis format for
compact storage.
"""
band = FrequencyBand(20, resample_to.nyquist)
class AudioGraph(BaseModel):
meta = JSONFeature(
MetaData,
store=True,
encoder=AudioMetaDataEncoder)
raw = ByteStreamFeature(
ByteStream,
chunksize=chunksize_bytes,
needs=meta,
store=False)
ogg = OggVorbisFeature(
OggVorbis,
needs=raw,
store=True)
pcm = AudioSamplesFeature(
AudioStream,
needs=raw,
store=False)
resampled = AudioSamplesFeature(
Resampler,
needs=pcm,
samplerate=resample_to,
store=False)
windowed = ArrayWithUnitsFeature(
SlidingWindow,
needs=resampled,
wscheme=HalfLapped(),
wfunc=OggVorbisWindowingFunc(),
store=False)
dct = ArrayWithUnitsFeature(
DCT,
needs=windowed,
store=True)
fft = ArrayWithUnitsFeature(
FFT,
needs=windowed,
store=store_fft)
bark = ArrayWithUnitsFeature(
BarkBands,
needs=fft,
frequency_band=band,
store=True)
centroid = ArrayWithUnitsFeature(
SpectralCentroid,
needs=bark,
store=True)
chroma = ArrayWithUnitsFeature(
Chroma,
needs=fft,
frequency_band=band,
store=True)
bfcc = ArrayWithUnitsFeature(
BFCC,
needs=fft,
store=True)
return AudioGraph |
def t_LESSTHAN(self, t):
r"\<"
t.endlexpos = t.lexpos + len(t.value)
return t | r"\< | Below is the the instruction that describes the task:
### Input:
r"\<
### Response:
def t_LESSTHAN(self, t):
r"\<"
t.endlexpos = t.lexpos + len(t.value)
return t |
def addressbatch(self, data, **kwargs):
'''
Send either a CSV file or data to the addressbatch API.
According to the Census, "there is currently an upper limit of 1000 records per batch file."
If a file, must have no header and fields id,street,city,state,zip
If data, should be a list of dicts with the above fields (although ID is optional)
'''
# Does data quack like a file handle?
if hasattr(data, 'read'):
return self._post_batch(f=data, **kwargs)
# Check if it's a string file
elif isinstance(data, string_types):
with open(data, 'rb') as f:
return self._post_batch(f=f, **kwargs)
else:
# Otherwise, assume a list of dicts
return self._post_batch(data=data, **kwargs) | Send either a CSV file or data to the addressbatch API.
According to the Census, "there is currently an upper limit of 1000 records per batch file."
If a file, must have no header and fields id,street,city,state,zip
If data, should be a list of dicts with the above fields (although ID is optional) | Below is the the instruction that describes the task:
### Input:
Send either a CSV file or data to the addressbatch API.
According to the Census, "there is currently an upper limit of 1000 records per batch file."
If a file, must have no header and fields id,street,city,state,zip
If data, should be a list of dicts with the above fields (although ID is optional)
### Response:
def addressbatch(self, data, **kwargs):
'''
Send either a CSV file or data to the addressbatch API.
According to the Census, "there is currently an upper limit of 1000 records per batch file."
If a file, must have no header and fields id,street,city,state,zip
If data, should be a list of dicts with the above fields (although ID is optional)
'''
# Does data quack like a file handle?
if hasattr(data, 'read'):
return self._post_batch(f=data, **kwargs)
# Check if it's a string file
elif isinstance(data, string_types):
with open(data, 'rb') as f:
return self._post_batch(f=f, **kwargs)
else:
# Otherwise, assume a list of dicts
return self._post_batch(data=data, **kwargs) |
def delete_connection():
"""
Stop and destroy Bloomberg connection
"""
if _CON_SYM_ in globals():
con = globals().pop(_CON_SYM_)
if not getattr(con, '_session').start(): con.stop() | Stop and destroy Bloomberg connection | Below is the the instruction that describes the task:
### Input:
Stop and destroy Bloomberg connection
### Response:
def delete_connection():
"""
Stop and destroy Bloomberg connection
"""
if _CON_SYM_ in globals():
con = globals().pop(_CON_SYM_)
if not getattr(con, '_session').start(): con.stop() |
def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Write the data encoding the Certificate object to a stream.
Args:
ostream (Stream): A data stream in which to encode object data,
supporting a write method; usually a BytearrayStream object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
"""
tstream = BytearrayStream()
self.certificate_type.write(tstream, kmip_version=kmip_version)
self.certificate_value.write(tstream, kmip_version=kmip_version)
self.length = tstream.length()
super(Certificate, self).write(ostream, kmip_version=kmip_version)
ostream.write(tstream.buffer) | Write the data encoding the Certificate object to a stream.
Args:
ostream (Stream): A data stream in which to encode object data,
supporting a write method; usually a BytearrayStream object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0. | Below is the the instruction that describes the task:
### Input:
Write the data encoding the Certificate object to a stream.
Args:
ostream (Stream): A data stream in which to encode object data,
supporting a write method; usually a BytearrayStream object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
### Response:
def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Write the data encoding the Certificate object to a stream.
Args:
ostream (Stream): A data stream in which to encode object data,
supporting a write method; usually a BytearrayStream object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
"""
tstream = BytearrayStream()
self.certificate_type.write(tstream, kmip_version=kmip_version)
self.certificate_value.write(tstream, kmip_version=kmip_version)
self.length = tstream.length()
super(Certificate, self).write(ostream, kmip_version=kmip_version)
ostream.write(tstream.buffer) |
def _print_entity_intro(self, g=None, entity=None, first_time=True):
"""after a selection, prints on screen basic info about onto or entity, plus change prompt
2015-10-18: removed the sound
2016-01-18: entity is the shell wrapper around the ontospy entity
"""
if entity:
self._clear_screen()
obj = entity['object']
self._print("Loaded %s: <%s>" % (entity['type'].capitalize(), str(obj.uri)), "TIP")
self._print("----------------", "TIP")
# self._print(obj.bestDescription(), "TEXT")
if first_time:
self.prompt = _get_prompt(self.current['file'], self.currentEntity)
elif g:
self._printDescription(False)
if first_time:
self.prompt = _get_prompt(self.current['file']) | after a selection, prints on screen basic info about onto or entity, plus change prompt
2015-10-18: removed the sound
2016-01-18: entity is the shell wrapper around the ontospy entity | Below is the the instruction that describes the task:
### Input:
after a selection, prints on screen basic info about onto or entity, plus change prompt
2015-10-18: removed the sound
2016-01-18: entity is the shell wrapper around the ontospy entity
### Response:
def _print_entity_intro(self, g=None, entity=None, first_time=True):
"""after a selection, prints on screen basic info about onto or entity, plus change prompt
2015-10-18: removed the sound
2016-01-18: entity is the shell wrapper around the ontospy entity
"""
if entity:
self._clear_screen()
obj = entity['object']
self._print("Loaded %s: <%s>" % (entity['type'].capitalize(), str(obj.uri)), "TIP")
self._print("----------------", "TIP")
# self._print(obj.bestDescription(), "TEXT")
if first_time:
self.prompt = _get_prompt(self.current['file'], self.currentEntity)
elif g:
self._printDescription(False)
if first_time:
self.prompt = _get_prompt(self.current['file']) |
def _variant_vc(checkpoints):
"""Add variant calling to workflow, if configured.
"""
if not checkpoints.get("vc"):
return [], []
vc_wf = [s("get_parallel_regions", "batch-split",
[["batch_rec"]],
[cwlout(["region_block"], {"type": "array", "items": "string"})],
"bcbio-vc",
disk={"files": 2.0}, cores=1),
s("variantcall_batch_region", "batch-parallel",
[["batch_rec"], ["region_block"]],
[cwlout(["vrn_file_region"], ["File", "null"], [".tbi"]),
cwlout(["region_block"], {"type": "array", "items": "string"})],
"bcbio-vc", ["bcftools", "bedtools", "freebayes=1.1.0.46",
"gatk4", "vqsr_cnn", "deepvariant;env=dv", "sentieon;env=python2",
"htslib", "octopus", "picard", "platypus-variant;env=python2", "pythonpy",
"samtools", "pysam>=0.13.0", "strelka;env=python2", "vardict", "vardict-java",
"varscan", "moreutils", "vcfanno", "vcflib", "vt", "r=3.5.1", "r-base",
"perl"],
disk={"files": 2.0}),
s("concat_batch_variantcalls", "batch-merge",
[["batch_rec"], ["region_block"], ["vrn_file_region"]],
[cwlout(["vrn_file"], "File", [".tbi"])],
"bcbio-vc", ["bcftools", "htslib", "gatk4"],
disk={"files": 1.5}, cores=1)]
if not checkpoints.get("jointvc"):
vc_wf += [s("postprocess_variants", "batch-single",
[["batch_rec"], ["vrn_file"]],
[cwlout(["vrn_file"], "File", [".tbi"])],
"bcbio-vc", ["snpeff=4.3.1t"], disk={"files": 0.5})]
vc_rec_exclude = [["align_bam"]]
if not checkpoints.get("jointvc"):
vc_rec_exclude.append(["genome_resources", "variation"])
vc_wf += [s("compare_to_rm", "batch-single",
[["batch_rec"], ["vrn_file"]],
[cwlout("vc_rec", "record",
fields=[cwlout(["batch_samples"], ["null", {"type": "array", "items": "string"}]),
cwlout(["validate", "summary"], ["File", "null"]),
cwlout(["validate", "tp"], ["File", "null"], [".tbi"]),
cwlout(["validate", "fp"], ["File", "null"], [".tbi"]),
cwlout(["validate", "fn"], ["File", "null"], [".tbi"]),
cwlout("inherit", exclude=vc_rec_exclude)])],
"bcbio-vc", ["bcftools", "bedtools", "pythonpy", "gvcf-regions;env=python2",
"htslib", "rtg-tools", "vcfanno"],
disk={"files": 1.5})]
batch_in = [["analysis"], ["genome_build"], ["align_bam"], ["vrn_file"],
["metadata", "batch"], ["metadata", "phenotype"],
["config", "algorithm", "callable_regions"], ["regions", "sample_callable"],
["config", "algorithm", "variantcaller"],
["config", "algorithm", "ensemble"],
["config", "algorithm", "vcfanno"],
["config", "algorithm", "coverage_interval"],
["config", "algorithm", "effects"],
["config", "algorithm", "min_allele_fraction"],
["config", "algorithm", "exclude_regions"],
["config", "algorithm", "variant_regions"],
["config", "algorithm", "variant_regions_merged"],
["config", "algorithm", "validate"], ["config", "algorithm", "validate_regions"],
["config", "algorithm", "tools_on"],
["config", "algorithm", "tools_off"],
["reference", "fasta", "base"],
["reference", "rtg"], ["reference", "genome_context"],
["genome_resources", "variation", "clinvar"],
["genome_resources", "variation", "cosmic"], ["genome_resources", "variation", "dbsnp"],
["genome_resources", "variation", "esp"], ["genome_resources", "variation", "exac"],
["genome_resources", "variation", "gnomad_exome"],
["genome_resources", "variation", "1000g"],
["genome_resources", "variation", "lcr"], ["genome_resources", "variation", "polyx"],
["genome_resources", "variation", "encode_blacklist"],
["genome_resources", "aliases", "ensembl"], ["genome_resources", "aliases", "human"],
["genome_resources", "aliases", "snpeff"], ["reference", "snpeff", "genome_build"]]
if checkpoints.get("umi"):
batch_in.append(["config", "algorithm", "umi_type"])
if checkpoints.get("rnaseq"):
batch_in += [["genome_resources", "variation", "editing"]]
else:
batch_in += [["genome_resources", "variation", "train_hapmap"],
["genome_resources", "variation", "train_indels"]]
vc = [s("batch_for_variantcall", "multi-batch", batch_in,
[cwlout("batch_rec", "record",
fields=[cwlout(["config", "algorithm", "variantcaller_order"], "int"),
cwlout("inherit")])],
"bcbio-vc",
disk={"files": 2.0}, cores=1,
unlist=[["config", "algorithm", "variantcaller"]], no_files=True),
w("variantcall", "multi-parallel", vc_wf,
[["region"], ["region_block"], ["vrn_file_region"], ["vrn_file"], ["validate", "summary"]])]
if checkpoints.get("jointvc"):
vc += _variant_jointvc()
if checkpoints.get("ensemble"):
vc += _variant_ensemble(checkpoints)
summarize_in = [["jointvc_rec" if checkpoints.get("jointvc") else "vc_rec"]]
if checkpoints.get("ensemble"):
summarize_in += [["ensemble_rec"]]
vc += [s("summarize_vc", "multi-combined", summarize_in,
[cwlout(["variants", "calls"], {"type": "array", "items": ["File", "null"]}),
cwlout(["variants", "gvcf"], ["null", {"type": "array", "items": ["File", "null"]}]),
cwlout(["variants", "samples"], {"type": "array", "items": {"type": "array",
"items": ["File", "null"]}}),
cwlout(["validate", "grading_summary"], ["File", "null"]),
cwlout(["validate", "grading_plots"], {"type": "array", "items": ["File", "null"]})],
"bcbio-vc",
disk={"files": 2.0}, cores=1)]
return vc, [["validate", "grading_summary"], ["variants", "calls"], ["variants", "gvcf"]] | Add variant calling to workflow, if configured. | Below is the the instruction that describes the task:
### Input:
Add variant calling to workflow, if configured.
### Response:
def _variant_vc(checkpoints):
"""Add variant calling to workflow, if configured.
"""
if not checkpoints.get("vc"):
return [], []
vc_wf = [s("get_parallel_regions", "batch-split",
[["batch_rec"]],
[cwlout(["region_block"], {"type": "array", "items": "string"})],
"bcbio-vc",
disk={"files": 2.0}, cores=1),
s("variantcall_batch_region", "batch-parallel",
[["batch_rec"], ["region_block"]],
[cwlout(["vrn_file_region"], ["File", "null"], [".tbi"]),
cwlout(["region_block"], {"type": "array", "items": "string"})],
"bcbio-vc", ["bcftools", "bedtools", "freebayes=1.1.0.46",
"gatk4", "vqsr_cnn", "deepvariant;env=dv", "sentieon;env=python2",
"htslib", "octopus", "picard", "platypus-variant;env=python2", "pythonpy",
"samtools", "pysam>=0.13.0", "strelka;env=python2", "vardict", "vardict-java",
"varscan", "moreutils", "vcfanno", "vcflib", "vt", "r=3.5.1", "r-base",
"perl"],
disk={"files": 2.0}),
s("concat_batch_variantcalls", "batch-merge",
[["batch_rec"], ["region_block"], ["vrn_file_region"]],
[cwlout(["vrn_file"], "File", [".tbi"])],
"bcbio-vc", ["bcftools", "htslib", "gatk4"],
disk={"files": 1.5}, cores=1)]
if not checkpoints.get("jointvc"):
vc_wf += [s("postprocess_variants", "batch-single",
[["batch_rec"], ["vrn_file"]],
[cwlout(["vrn_file"], "File", [".tbi"])],
"bcbio-vc", ["snpeff=4.3.1t"], disk={"files": 0.5})]
vc_rec_exclude = [["align_bam"]]
if not checkpoints.get("jointvc"):
vc_rec_exclude.append(["genome_resources", "variation"])
vc_wf += [s("compare_to_rm", "batch-single",
[["batch_rec"], ["vrn_file"]],
[cwlout("vc_rec", "record",
fields=[cwlout(["batch_samples"], ["null", {"type": "array", "items": "string"}]),
cwlout(["validate", "summary"], ["File", "null"]),
cwlout(["validate", "tp"], ["File", "null"], [".tbi"]),
cwlout(["validate", "fp"], ["File", "null"], [".tbi"]),
cwlout(["validate", "fn"], ["File", "null"], [".tbi"]),
cwlout("inherit", exclude=vc_rec_exclude)])],
"bcbio-vc", ["bcftools", "bedtools", "pythonpy", "gvcf-regions;env=python2",
"htslib", "rtg-tools", "vcfanno"],
disk={"files": 1.5})]
batch_in = [["analysis"], ["genome_build"], ["align_bam"], ["vrn_file"],
["metadata", "batch"], ["metadata", "phenotype"],
["config", "algorithm", "callable_regions"], ["regions", "sample_callable"],
["config", "algorithm", "variantcaller"],
["config", "algorithm", "ensemble"],
["config", "algorithm", "vcfanno"],
["config", "algorithm", "coverage_interval"],
["config", "algorithm", "effects"],
["config", "algorithm", "min_allele_fraction"],
["config", "algorithm", "exclude_regions"],
["config", "algorithm", "variant_regions"],
["config", "algorithm", "variant_regions_merged"],
["config", "algorithm", "validate"], ["config", "algorithm", "validate_regions"],
["config", "algorithm", "tools_on"],
["config", "algorithm", "tools_off"],
["reference", "fasta", "base"],
["reference", "rtg"], ["reference", "genome_context"],
["genome_resources", "variation", "clinvar"],
["genome_resources", "variation", "cosmic"], ["genome_resources", "variation", "dbsnp"],
["genome_resources", "variation", "esp"], ["genome_resources", "variation", "exac"],
["genome_resources", "variation", "gnomad_exome"],
["genome_resources", "variation", "1000g"],
["genome_resources", "variation", "lcr"], ["genome_resources", "variation", "polyx"],
["genome_resources", "variation", "encode_blacklist"],
["genome_resources", "aliases", "ensembl"], ["genome_resources", "aliases", "human"],
["genome_resources", "aliases", "snpeff"], ["reference", "snpeff", "genome_build"]]
if checkpoints.get("umi"):
batch_in.append(["config", "algorithm", "umi_type"])
if checkpoints.get("rnaseq"):
batch_in += [["genome_resources", "variation", "editing"]]
else:
batch_in += [["genome_resources", "variation", "train_hapmap"],
["genome_resources", "variation", "train_indels"]]
vc = [s("batch_for_variantcall", "multi-batch", batch_in,
[cwlout("batch_rec", "record",
fields=[cwlout(["config", "algorithm", "variantcaller_order"], "int"),
cwlout("inherit")])],
"bcbio-vc",
disk={"files": 2.0}, cores=1,
unlist=[["config", "algorithm", "variantcaller"]], no_files=True),
w("variantcall", "multi-parallel", vc_wf,
[["region"], ["region_block"], ["vrn_file_region"], ["vrn_file"], ["validate", "summary"]])]
if checkpoints.get("jointvc"):
vc += _variant_jointvc()
if checkpoints.get("ensemble"):
vc += _variant_ensemble(checkpoints)
summarize_in = [["jointvc_rec" if checkpoints.get("jointvc") else "vc_rec"]]
if checkpoints.get("ensemble"):
summarize_in += [["ensemble_rec"]]
vc += [s("summarize_vc", "multi-combined", summarize_in,
[cwlout(["variants", "calls"], {"type": "array", "items": ["File", "null"]}),
cwlout(["variants", "gvcf"], ["null", {"type": "array", "items": ["File", "null"]}]),
cwlout(["variants", "samples"], {"type": "array", "items": {"type": "array",
"items": ["File", "null"]}}),
cwlout(["validate", "grading_summary"], ["File", "null"]),
cwlout(["validate", "grading_plots"], {"type": "array", "items": ["File", "null"]})],
"bcbio-vc",
disk={"files": 2.0}, cores=1)]
return vc, [["validate", "grading_summary"], ["variants", "calls"], ["variants", "gvcf"]] |
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if __debug__: verbose.report('backend_agg.new_figure_manager',
'debug-annoying')
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig) | Create a new figure manager instance | Below is the the instruction that describes the task:
### Input:
Create a new figure manager instance
### Response:
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if __debug__: verbose.report('backend_agg.new_figure_manager',
'debug-annoying')
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig) |
def ahrs3_encode(self, roll, pitch, yaw, altitude, lat, lng, v1, v2, v3, v4):
'''
Status of third AHRS filter if available. This is for ANU research
group (Ali and Sean)
roll : Roll angle (rad) (float)
pitch : Pitch angle (rad) (float)
yaw : Yaw angle (rad) (float)
altitude : Altitude (MSL) (float)
lat : Latitude in degrees * 1E7 (int32_t)
lng : Longitude in degrees * 1E7 (int32_t)
v1 : test variable1 (float)
v2 : test variable2 (float)
v3 : test variable3 (float)
v4 : test variable4 (float)
'''
return MAVLink_ahrs3_message(roll, pitch, yaw, altitude, lat, lng, v1, v2, v3, v4) | Status of third AHRS filter if available. This is for ANU research
group (Ali and Sean)
roll : Roll angle (rad) (float)
pitch : Pitch angle (rad) (float)
yaw : Yaw angle (rad) (float)
altitude : Altitude (MSL) (float)
lat : Latitude in degrees * 1E7 (int32_t)
lng : Longitude in degrees * 1E7 (int32_t)
v1 : test variable1 (float)
v2 : test variable2 (float)
v3 : test variable3 (float)
v4 : test variable4 (float) | Below is the the instruction that describes the task:
### Input:
Status of third AHRS filter if available. This is for ANU research
group (Ali and Sean)
roll : Roll angle (rad) (float)
pitch : Pitch angle (rad) (float)
yaw : Yaw angle (rad) (float)
altitude : Altitude (MSL) (float)
lat : Latitude in degrees * 1E7 (int32_t)
lng : Longitude in degrees * 1E7 (int32_t)
v1 : test variable1 (float)
v2 : test variable2 (float)
v3 : test variable3 (float)
v4 : test variable4 (float)
### Response:
def ahrs3_encode(self, roll, pitch, yaw, altitude, lat, lng, v1, v2, v3, v4):
'''
Status of third AHRS filter if available. This is for ANU research
group (Ali and Sean)
roll : Roll angle (rad) (float)
pitch : Pitch angle (rad) (float)
yaw : Yaw angle (rad) (float)
altitude : Altitude (MSL) (float)
lat : Latitude in degrees * 1E7 (int32_t)
lng : Longitude in degrees * 1E7 (int32_t)
v1 : test variable1 (float)
v2 : test variable2 (float)
v3 : test variable3 (float)
v4 : test variable4 (float)
'''
return MAVLink_ahrs3_message(roll, pitch, yaw, altitude, lat, lng, v1, v2, v3, v4) |
def DEFINE_bool(self, name, default, help, constant=False):
"""A helper for defining boolean options."""
self.AddOption(
type_info.Bool(name=name, default=default, description=help),
constant=constant) | A helper for defining boolean options. | Below is the the instruction that describes the task:
### Input:
A helper for defining boolean options.
### Response:
def DEFINE_bool(self, name, default, help, constant=False):
"""A helper for defining boolean options."""
self.AddOption(
type_info.Bool(name=name, default=default, description=help),
constant=constant) |
def lowerbound(self, axis=0):
"""
Get the lower bound of the binning along an axis
"""
if not 0 <= axis < self.GetDimension():
raise ValueError(
"axis must be a non-negative integer less than "
"the dimensionality of the histogram")
if axis == 0:
return self.xedges(1)
if axis == 1:
return self.yedges(1)
if axis == 2:
return self.zedges(1)
raise TypeError("axis must be an integer") | Get the lower bound of the binning along an axis | Below is the the instruction that describes the task:
### Input:
Get the lower bound of the binning along an axis
### Response:
def lowerbound(self, axis=0):
"""
Get the lower bound of the binning along an axis
"""
if not 0 <= axis < self.GetDimension():
raise ValueError(
"axis must be a non-negative integer less than "
"the dimensionality of the histogram")
if axis == 0:
return self.xedges(1)
if axis == 1:
return self.yedges(1)
if axis == 2:
return self.zedges(1)
raise TypeError("axis must be an integer") |
def http_error_401(self, url, fp, errcode, errmsg, headers, data=None,
retry=False):
"""Error 401 -- authentication required.
This function supports Basic authentication only."""
if 'www-authenticate' not in headers:
URLopener.http_error_default(self, url, fp,
errcode, errmsg, headers)
stuff = headers['www-authenticate']
match = re.match('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', stuff)
if not match:
URLopener.http_error_default(self, url, fp,
errcode, errmsg, headers)
scheme, realm = match.groups()
if scheme.lower() != 'basic':
URLopener.http_error_default(self, url, fp,
errcode, errmsg, headers)
if not retry:
URLopener.http_error_default(self, url, fp, errcode, errmsg,
headers)
name = 'retry_' + self.type + '_basic_auth'
if data is None:
return getattr(self,name)(url, realm)
else:
return getattr(self,name)(url, realm, data) | Error 401 -- authentication required.
This function supports Basic authentication only. | Below is the the instruction that describes the task:
### Input:
Error 401 -- authentication required.
This function supports Basic authentication only.
### Response:
def http_error_401(self, url, fp, errcode, errmsg, headers, data=None,
retry=False):
"""Error 401 -- authentication required.
This function supports Basic authentication only."""
if 'www-authenticate' not in headers:
URLopener.http_error_default(self, url, fp,
errcode, errmsg, headers)
stuff = headers['www-authenticate']
match = re.match('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', stuff)
if not match:
URLopener.http_error_default(self, url, fp,
errcode, errmsg, headers)
scheme, realm = match.groups()
if scheme.lower() != 'basic':
URLopener.http_error_default(self, url, fp,
errcode, errmsg, headers)
if not retry:
URLopener.http_error_default(self, url, fp, errcode, errmsg,
headers)
name = 'retry_' + self.type + '_basic_auth'
if data is None:
return getattr(self,name)(url, realm)
else:
return getattr(self,name)(url, realm, data) |
def _recv_msg(self, mid_stream=False):
"""
:param mid_stream: are we receiving in a streaming operation?
:type mid_stream: boolean
"""
try:
msgbuf = self._recv_pkt()
except BadResource as e:
e.mid_stream = mid_stream
raise
except socket.timeout as e:
# A timeout can leave the socket in an inconsistent state because
# it might still receive the data later and mix up with a
# subsequent request.
# https://github.com/basho/riak-python-client/issues/425
raise BadResource(e, mid_stream)
mv = memoryview(msgbuf)
mcb = mv[0:1]
if self.bytes_required:
mcb = mcb.tobytes()
try:
msg_code, = struct.unpack("B", mcb)
except struct.error:
# NB: Python 2.7.3 requires this
# http://bugs.python.org/issue10212
msg_code, = struct.unpack("B", mv[0:1].tobytes())
self.bytes_required = True
data = mv[1:].tobytes()
return (msg_code, data) | :param mid_stream: are we receiving in a streaming operation?
:type mid_stream: boolean | Below is the the instruction that describes the task:
### Input:
:param mid_stream: are we receiving in a streaming operation?
:type mid_stream: boolean
### Response:
def _recv_msg(self, mid_stream=False):
"""
:param mid_stream: are we receiving in a streaming operation?
:type mid_stream: boolean
"""
try:
msgbuf = self._recv_pkt()
except BadResource as e:
e.mid_stream = mid_stream
raise
except socket.timeout as e:
# A timeout can leave the socket in an inconsistent state because
# it might still receive the data later and mix up with a
# subsequent request.
# https://github.com/basho/riak-python-client/issues/425
raise BadResource(e, mid_stream)
mv = memoryview(msgbuf)
mcb = mv[0:1]
if self.bytes_required:
mcb = mcb.tobytes()
try:
msg_code, = struct.unpack("B", mcb)
except struct.error:
# NB: Python 2.7.3 requires this
# http://bugs.python.org/issue10212
msg_code, = struct.unpack("B", mv[0:1].tobytes())
self.bytes_required = True
data = mv[1:].tobytes()
return (msg_code, data) |
def unassign_authorization_from_vault(self, authorization_id, vault_id):
"""Removes an ``Authorization`` from a ``Vault``.
arg: authorization_id (osid.id.Id): the ``Id`` of the
``Authorization``
arg: vault_id (osid.id.Id): the ``Id`` of the ``Vault``
raise: NotFound - ``authorization_id`` or ``vault_id`` not
found or ``authorization_id`` not assigned to
``vault_id``
raise: NullArgument - ``authorization_id`` or ``vault_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinAssignmentSession.unassign_resource_from_bin
mgr = self._get_provider_manager('AUTHORIZATION', local=True)
lookup_session = mgr.get_vault_lookup_session(proxy=self._proxy)
lookup_session.get_vault(vault_id) # to raise NotFound
self._unassign_object_from_catalog(authorization_id, vault_id) | Removes an ``Authorization`` from a ``Vault``.
arg: authorization_id (osid.id.Id): the ``Id`` of the
``Authorization``
arg: vault_id (osid.id.Id): the ``Id`` of the ``Vault``
raise: NotFound - ``authorization_id`` or ``vault_id`` not
found or ``authorization_id`` not assigned to
``vault_id``
raise: NullArgument - ``authorization_id`` or ``vault_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Removes an ``Authorization`` from a ``Vault``.
arg: authorization_id (osid.id.Id): the ``Id`` of the
``Authorization``
arg: vault_id (osid.id.Id): the ``Id`` of the ``Vault``
raise: NotFound - ``authorization_id`` or ``vault_id`` not
found or ``authorization_id`` not assigned to
``vault_id``
raise: NullArgument - ``authorization_id`` or ``vault_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
### Response:
def unassign_authorization_from_vault(self, authorization_id, vault_id):
"""Removes an ``Authorization`` from a ``Vault``.
arg: authorization_id (osid.id.Id): the ``Id`` of the
``Authorization``
arg: vault_id (osid.id.Id): the ``Id`` of the ``Vault``
raise: NotFound - ``authorization_id`` or ``vault_id`` not
found or ``authorization_id`` not assigned to
``vault_id``
raise: NullArgument - ``authorization_id`` or ``vault_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinAssignmentSession.unassign_resource_from_bin
mgr = self._get_provider_manager('AUTHORIZATION', local=True)
lookup_session = mgr.get_vault_lookup_session(proxy=self._proxy)
lookup_session.get_vault(vault_id) # to raise NotFound
self._unassign_object_from_catalog(authorization_id, vault_id) |
def write_loudest_events(page, bins, onsource=False):
"""
Write injection chisq plots to markup.page object page
"""
th = ['']+['Mchirp %s - %s' % tuple(bin) for bin in bins]
td = []
plots = ['BestNR','SNR']
if onsource:
trial = 'ONSOURCE'
else:
trial = 'OFFTRIAL_1'
for pTag in plots:
row = pTag.lower()
d = [pTag]
for bin in bins:
b = '%s_%s' % tuple(bin)
plot = markup.page()
p = "%s/efficiency/%s_vs_fap_%s.png" % (trial, row, b)
plot.a(href=p, title="FAP versus %s" % pTag)
plot.img(src=p)
plot.a.close()
d.append(plot())
td.append(d)
row = 'snruncut'
d = ['SNR after cuts <br> have been applied']
for bin in bins:
b = '%s_%s' % tuple(bin)
plot = markup.page()
p = "%s/efficiency/%s_vs_fap_%s.png" % (trial, row, b)
plot.a(href=p, title="FAP versus %s" % pTag)
plot.img(src=p)
plot.a.close()
d.append(plot())
td.append(d)
page = write_table(page, th, td)
page.add('For more details on the loudest offsource events see')
page.a(href='%s/efficiency/loudest_offsource_trigs.html' % (trial))
page.add('here.')
page.a.close()
return page | Write injection chisq plots to markup.page object page | Below is the the instruction that describes the task:
### Input:
Write injection chisq plots to markup.page object page
### Response:
def write_loudest_events(page, bins, onsource=False):
"""
Write injection chisq plots to markup.page object page
"""
th = ['']+['Mchirp %s - %s' % tuple(bin) for bin in bins]
td = []
plots = ['BestNR','SNR']
if onsource:
trial = 'ONSOURCE'
else:
trial = 'OFFTRIAL_1'
for pTag in plots:
row = pTag.lower()
d = [pTag]
for bin in bins:
b = '%s_%s' % tuple(bin)
plot = markup.page()
p = "%s/efficiency/%s_vs_fap_%s.png" % (trial, row, b)
plot.a(href=p, title="FAP versus %s" % pTag)
plot.img(src=p)
plot.a.close()
d.append(plot())
td.append(d)
row = 'snruncut'
d = ['SNR after cuts <br> have been applied']
for bin in bins:
b = '%s_%s' % tuple(bin)
plot = markup.page()
p = "%s/efficiency/%s_vs_fap_%s.png" % (trial, row, b)
plot.a(href=p, title="FAP versus %s" % pTag)
plot.img(src=p)
plot.a.close()
d.append(plot())
td.append(d)
page = write_table(page, th, td)
page.add('For more details on the loudest offsource events see')
page.a(href='%s/efficiency/loudest_offsource_trigs.html' % (trial))
page.add('here.')
page.a.close()
return page |
def _to_sky_params(self, wcs, mode='all'):
"""
Convert the pixel aperture parameters to those for a sky
aperture.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
sky_params : dict
A dictionary of parameters for an equivalent sky aperture.
"""
sky_params = {}
x, y = np.transpose(self.positions)
sky_params['positions'] = pixel_to_skycoord(x, y, wcs, mode=mode)
# The aperture object must have a single value for each shape
# parameter so we must use a single pixel scale for all positions.
# Here, we define the scale at the WCS CRVAL position.
crval = SkyCoord([wcs.wcs.crval], frame=wcs_to_celestial_frame(wcs),
unit=wcs.wcs.cunit)
scale, angle = pixel_scale_angle_at_skycoord(crval, wcs)
params = self._params[:]
theta_key = 'theta'
if theta_key in self._params:
sky_params[theta_key] = (self.theta * u.rad) - angle.to(u.rad)
params.remove(theta_key)
param_vals = [getattr(self, param) for param in params]
for param, param_val in zip(params, param_vals):
sky_params[param] = (param_val * u.pix * scale).to(u.arcsec)
return sky_params | Convert the pixel aperture parameters to those for a sky
aperture.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
sky_params : dict
A dictionary of parameters for an equivalent sky aperture. | Below is the the instruction that describes the task:
### Input:
Convert the pixel aperture parameters to those for a sky
aperture.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
sky_params : dict
A dictionary of parameters for an equivalent sky aperture.
### Response:
def _to_sky_params(self, wcs, mode='all'):
"""
Convert the pixel aperture parameters to those for a sky
aperture.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
sky_params : dict
A dictionary of parameters for an equivalent sky aperture.
"""
sky_params = {}
x, y = np.transpose(self.positions)
sky_params['positions'] = pixel_to_skycoord(x, y, wcs, mode=mode)
# The aperture object must have a single value for each shape
# parameter so we must use a single pixel scale for all positions.
# Here, we define the scale at the WCS CRVAL position.
crval = SkyCoord([wcs.wcs.crval], frame=wcs_to_celestial_frame(wcs),
unit=wcs.wcs.cunit)
scale, angle = pixel_scale_angle_at_skycoord(crval, wcs)
params = self._params[:]
theta_key = 'theta'
if theta_key in self._params:
sky_params[theta_key] = (self.theta * u.rad) - angle.to(u.rad)
params.remove(theta_key)
param_vals = [getattr(self, param) for param in params]
for param, param_val in zip(params, param_vals):
sky_params[param] = (param_val * u.pix * scale).to(u.arcsec)
return sky_params |
def get_baking_statuses_sql(get_request):
""" Creates SQL to get info on baking books filtered from GET request.
All books that have ever attempted to bake will be retured if they
pass the filters in the GET request.
If a single book has been requested to bake multiple times there will
be a row for each of the baking attempts.
By default the results are sorted in descending order of when they were
requested to bake.
N.B. The version reported for a print-style linked recipe will the the
lowest cnx-recipes release installed that contains the exact recipe
used to bake that book, regardless of when the book was baked relative
to recipe releases. E.g. if a book uses the 'physics' recipe, and it is
identical for versions 1.1, 1.2, 1.3, and 1.4, then it will be reported
as version 1.1, even if the most recent release is tagged 1.4.
"""
args = {}
sort = get_request.get('sort', 'bpsa.created DESC')
if (len(sort.split(" ")) != 2 or
sort.split(" ")[0] not in SORTS_DICT.keys() or
sort.split(" ")[1] not in ARROW_MATCH.keys()):
raise httpexceptions.HTTPBadRequest(
'invalid sort: {}'.format(sort))
if sort == "STATE ASC" or sort == "STATE DESC":
sort = 'bpsa.created DESC'
uuid_filter = get_request.get('uuid', '').strip()
author_filter = get_request.get('author', '').strip()
latest_filter = get_request.get('latest', False)
sql_filters = "WHERE"
if latest_filter:
sql_filters += """ ARRAY [m.major_version, m.minor_version] = (
SELECT max(ARRAY[major_version,minor_version]) FROM
modules where m.uuid= uuid) AND """
if uuid_filter != '':
args['uuid'] = uuid_filter
sql_filters += " m.uuid=%(uuid)s AND "
if author_filter != '':
author_filter = author_filter.decode('utf-8')
sql_filters += " %(author)s=ANY(m.authors) "
args["author"] = author_filter
if sql_filters.endswith("AND "):
sql_filters = sql_filters[:-4]
if sql_filters == "WHERE":
sql_filters = ""
# FIXME celery AsyncResult API is soooo sloow that this page takes
# 2 min. or more to load on production. As an workaround, this code
# accesses the celery_taskmeta table directly. Need to remove that access
# once we track enough state info ourselves. Want to track when queued,
# started, ended, etc. for future monitoring of baking system performance
# as well.
# The 'limit 1' subselect is to ensure the "oldest identical version"
# for recipes released as part of cnx-recipes (avoids one line per
# identical recipe file in different releases, for a single baking job)
statement = """
SELECT m.name, m.authors, m.uuid,
module_version(m.major_version,m.minor_version)
as current_version,
m.print_style,
CASE WHEN f.sha1 IS NOT NULL
THEN coalesce(dps.print_style,'(custom)')
ELSE dps.print_style
END AS recipe_name,
(select tag from print_style_recipes
where print_style = m.print_style
and fileid = m.recipe
order by revised asc limit 1) as recipe_tag,
coalesce(dps.fileid, m.recipe) as latest_recipe_id,
m.recipe as recipe_id,
f.sha1 as recipe,
m.module_ident,
ident_hash(m.uuid, m.major_version, m.minor_version),
bpsa.created, ctm.traceback,
CASE WHEN ctm.status = 'SUCCESS'
AND ms.statename = 'fallback'
THEN 'FALLBACK'
ELSE ctm.status
END as state
FROM document_baking_result_associations AS bpsa
INNER JOIN modules AS m USING (module_ident)
INNER JOIN modulestates as ms USING (stateid)
LEFT JOIN celery_taskmeta AS ctm
ON bpsa.result_id = ctm.task_id::uuid
LEFT JOIN default_print_style_recipes as dps
ON dps.print_style = m.print_style
LEFT JOIN latest_modules as lm
ON lm.uuid=m.uuid
LEFT JOIN files f on m.recipe = f.fileid
{}
ORDER BY {};
""".format(sql_filters, sort)
args.update({'sort': sort})
return statement, args | Creates SQL to get info on baking books filtered from GET request.
All books that have ever attempted to bake will be retured if they
pass the filters in the GET request.
If a single book has been requested to bake multiple times there will
be a row for each of the baking attempts.
By default the results are sorted in descending order of when they were
requested to bake.
N.B. The version reported for a print-style linked recipe will the the
lowest cnx-recipes release installed that contains the exact recipe
used to bake that book, regardless of when the book was baked relative
to recipe releases. E.g. if a book uses the 'physics' recipe, and it is
identical for versions 1.1, 1.2, 1.3, and 1.4, then it will be reported
as version 1.1, even if the most recent release is tagged 1.4. | Below is the the instruction that describes the task:
### Input:
Creates SQL to get info on baking books filtered from GET request.
All books that have ever attempted to bake will be retured if they
pass the filters in the GET request.
If a single book has been requested to bake multiple times there will
be a row for each of the baking attempts.
By default the results are sorted in descending order of when they were
requested to bake.
N.B. The version reported for a print-style linked recipe will the the
lowest cnx-recipes release installed that contains the exact recipe
used to bake that book, regardless of when the book was baked relative
to recipe releases. E.g. if a book uses the 'physics' recipe, and it is
identical for versions 1.1, 1.2, 1.3, and 1.4, then it will be reported
as version 1.1, even if the most recent release is tagged 1.4.
### Response:
def get_baking_statuses_sql(get_request):
""" Creates SQL to get info on baking books filtered from GET request.
All books that have ever attempted to bake will be retured if they
pass the filters in the GET request.
If a single book has been requested to bake multiple times there will
be a row for each of the baking attempts.
By default the results are sorted in descending order of when they were
requested to bake.
N.B. The version reported for a print-style linked recipe will the the
lowest cnx-recipes release installed that contains the exact recipe
used to bake that book, regardless of when the book was baked relative
to recipe releases. E.g. if a book uses the 'physics' recipe, and it is
identical for versions 1.1, 1.2, 1.3, and 1.4, then it will be reported
as version 1.1, even if the most recent release is tagged 1.4.
"""
args = {}
sort = get_request.get('sort', 'bpsa.created DESC')
if (len(sort.split(" ")) != 2 or
sort.split(" ")[0] not in SORTS_DICT.keys() or
sort.split(" ")[1] not in ARROW_MATCH.keys()):
raise httpexceptions.HTTPBadRequest(
'invalid sort: {}'.format(sort))
if sort == "STATE ASC" or sort == "STATE DESC":
sort = 'bpsa.created DESC'
uuid_filter = get_request.get('uuid', '').strip()
author_filter = get_request.get('author', '').strip()
latest_filter = get_request.get('latest', False)
sql_filters = "WHERE"
if latest_filter:
sql_filters += """ ARRAY [m.major_version, m.minor_version] = (
SELECT max(ARRAY[major_version,minor_version]) FROM
modules where m.uuid= uuid) AND """
if uuid_filter != '':
args['uuid'] = uuid_filter
sql_filters += " m.uuid=%(uuid)s AND "
if author_filter != '':
author_filter = author_filter.decode('utf-8')
sql_filters += " %(author)s=ANY(m.authors) "
args["author"] = author_filter
if sql_filters.endswith("AND "):
sql_filters = sql_filters[:-4]
if sql_filters == "WHERE":
sql_filters = ""
# FIXME celery AsyncResult API is soooo sloow that this page takes
# 2 min. or more to load on production. As an workaround, this code
# accesses the celery_taskmeta table directly. Need to remove that access
# once we track enough state info ourselves. Want to track when queued,
# started, ended, etc. for future monitoring of baking system performance
# as well.
# The 'limit 1' subselect is to ensure the "oldest identical version"
# for recipes released as part of cnx-recipes (avoids one line per
# identical recipe file in different releases, for a single baking job)
statement = """
SELECT m.name, m.authors, m.uuid,
module_version(m.major_version,m.minor_version)
as current_version,
m.print_style,
CASE WHEN f.sha1 IS NOT NULL
THEN coalesce(dps.print_style,'(custom)')
ELSE dps.print_style
END AS recipe_name,
(select tag from print_style_recipes
where print_style = m.print_style
and fileid = m.recipe
order by revised asc limit 1) as recipe_tag,
coalesce(dps.fileid, m.recipe) as latest_recipe_id,
m.recipe as recipe_id,
f.sha1 as recipe,
m.module_ident,
ident_hash(m.uuid, m.major_version, m.minor_version),
bpsa.created, ctm.traceback,
CASE WHEN ctm.status = 'SUCCESS'
AND ms.statename = 'fallback'
THEN 'FALLBACK'
ELSE ctm.status
END as state
FROM document_baking_result_associations AS bpsa
INNER JOIN modules AS m USING (module_ident)
INNER JOIN modulestates as ms USING (stateid)
LEFT JOIN celery_taskmeta AS ctm
ON bpsa.result_id = ctm.task_id::uuid
LEFT JOIN default_print_style_recipes as dps
ON dps.print_style = m.print_style
LEFT JOIN latest_modules as lm
ON lm.uuid=m.uuid
LEFT JOIN files f on m.recipe = f.fileid
{}
ORDER BY {};
""".format(sql_filters, sort)
args.update({'sort': sort})
return statement, args |
def get_uuids():
"""List all bundle UUIDs in the worksheet."""
result = shell('cl ls -w {} -u'.format(worksheet))
uuids = result.split('\n')
uuids = uuids[1:-1] # trim non uuids
return uuids | List all bundle UUIDs in the worksheet. | Below is the the instruction that describes the task:
### Input:
List all bundle UUIDs in the worksheet.
### Response:
def get_uuids():
"""List all bundle UUIDs in the worksheet."""
result = shell('cl ls -w {} -u'.format(worksheet))
uuids = result.split('\n')
uuids = uuids[1:-1] # trim non uuids
return uuids |
def cf_number_from_integer(integer):
"""
Creates a CFNumber object from an integer
:param integer:
The integer to create the CFNumber for
:return:
A CFNumber
"""
integer_as_long = c_long(integer)
return CoreFoundation.CFNumberCreate(
CoreFoundation.kCFAllocatorDefault,
kCFNumberCFIndexType,
byref(integer_as_long)
) | Creates a CFNumber object from an integer
:param integer:
The integer to create the CFNumber for
:return:
A CFNumber | Below is the the instruction that describes the task:
### Input:
Creates a CFNumber object from an integer
:param integer:
The integer to create the CFNumber for
:return:
A CFNumber
### Response:
def cf_number_from_integer(integer):
"""
Creates a CFNumber object from an integer
:param integer:
The integer to create the CFNumber for
:return:
A CFNumber
"""
integer_as_long = c_long(integer)
return CoreFoundation.CFNumberCreate(
CoreFoundation.kCFAllocatorDefault,
kCFNumberCFIndexType,
byref(integer_as_long)
) |
def schemata(self) -> list:
"""
Return list with schemata in cache.
:return: list of schemata
"""
LOGGER.debug('SchemaCache.schemata >>>')
LOGGER.debug('SchemaCache.schemata <<<')
return [self._schema_key2schema[seq_no] for seq_no in self._schema_key2schema] | Return list with schemata in cache.
:return: list of schemata | Below is the the instruction that describes the task:
### Input:
Return list with schemata in cache.
:return: list of schemata
### Response:
def schemata(self) -> list:
"""
Return list with schemata in cache.
:return: list of schemata
"""
LOGGER.debug('SchemaCache.schemata >>>')
LOGGER.debug('SchemaCache.schemata <<<')
return [self._schema_key2schema[seq_no] for seq_no in self._schema_key2schema] |
def assess_angmom(X):
"""
Checks for change of sign in each component of the angular momentum.
Returns an array with ith entry 1 if no sign change in i component
and 0 if sign change.
Box = (0,0,0)
S.A loop = (0,0,1)
L.A loop = (1,0,0)
"""
L=angmom(X[0])
loop = np.array([1,1,1])
for i in X[1:]:
L0 = angmom(i)
if(L0[0]*L[0]<0.):
loop[0] = 0
if(L0[1]*L[1]<0.):
loop[1] = 0
if(L0[2]*L[2]<0.):
loop[2] = 0
return loop | Checks for change of sign in each component of the angular momentum.
Returns an array with ith entry 1 if no sign change in i component
and 0 if sign change.
Box = (0,0,0)
S.A loop = (0,0,1)
L.A loop = (1,0,0) | Below is the the instruction that describes the task:
### Input:
Checks for change of sign in each component of the angular momentum.
Returns an array with ith entry 1 if no sign change in i component
and 0 if sign change.
Box = (0,0,0)
S.A loop = (0,0,1)
L.A loop = (1,0,0)
### Response:
def assess_angmom(X):
"""
Checks for change of sign in each component of the angular momentum.
Returns an array with ith entry 1 if no sign change in i component
and 0 if sign change.
Box = (0,0,0)
S.A loop = (0,0,1)
L.A loop = (1,0,0)
"""
L=angmom(X[0])
loop = np.array([1,1,1])
for i in X[1:]:
L0 = angmom(i)
if(L0[0]*L[0]<0.):
loop[0] = 0
if(L0[1]*L[1]<0.):
loop[1] = 0
if(L0[2]*L[2]<0.):
loop[2] = 0
return loop |
def scale_ps(lat):
"""
This function calculates the scaling factor for a polar stereographic
projection (ie. SSM/I grid) to correct area calculations. The scaling
factor is defined (from Snyder, 1982, Map Projections used by the U.S.
Geological Survey) as:
k = (mc/m)*(t/tc), where:
m = cos(lat)/sqrt(1 - e2*sin(lat)^2)
t = tan(Pi/4 - lat/2)/((1 - e*sin(lat))/(1 + e*sin(lat)))^(e/2)
e2 = 0.006693883 is the earth eccentricity (Hughes ellipsoid)
e = sqrt(e2)
mc = m at the reference latitude (70 degrees)
tc = t at the reference latitude (70 degrees)
The ratio mc/tc is precalculated and stored in the variable m70_t70.
From Ben Smith PS scale m file (7/12/12)
"""
lat = np.array(lat)
if np.any(lat > 0):
m70_t70 = 1.9332279
#Hack to deal with pole
lat[lat>=90.0] = 89.999999999
else:
# for 71 deg, southern PS -- checked BS 5/2012
m70_t70 = 1.93903005
lat[lat<=-90.0] = -89.999999999
#for WGS84, a=6378137, 1/f = 298.257223563 -> 1-sqrt(1-e^2) = f
#-> 1-(1-f)^2 = e2 = 0.006694379990141
#e2 = 0.006693883
e2 = 0.006694379990141 # BS calculated from WGS84 parameters 5/2012
e = np.sqrt(e2)
lat = np.abs(np.deg2rad(lat))
slat = np.sin(lat)
clat = np.cos(lat)
m = clat/np.sqrt(1. - e2*slat**2)
t = np.tan(np.pi/4 - lat/2)/((1. - e*slat)/(1. + e*slat))**(e/2)
k = m70_t70*t/m
scale=(1./k)
return scale | This function calculates the scaling factor for a polar stereographic
projection (ie. SSM/I grid) to correct area calculations. The scaling
factor is defined (from Snyder, 1982, Map Projections used by the U.S.
Geological Survey) as:
k = (mc/m)*(t/tc), where:
m = cos(lat)/sqrt(1 - e2*sin(lat)^2)
t = tan(Pi/4 - lat/2)/((1 - e*sin(lat))/(1 + e*sin(lat)))^(e/2)
e2 = 0.006693883 is the earth eccentricity (Hughes ellipsoid)
e = sqrt(e2)
mc = m at the reference latitude (70 degrees)
tc = t at the reference latitude (70 degrees)
The ratio mc/tc is precalculated and stored in the variable m70_t70.
From Ben Smith PS scale m file (7/12/12) | Below is the the instruction that describes the task:
### Input:
This function calculates the scaling factor for a polar stereographic
projection (ie. SSM/I grid) to correct area calculations. The scaling
factor is defined (from Snyder, 1982, Map Projections used by the U.S.
Geological Survey) as:
k = (mc/m)*(t/tc), where:
m = cos(lat)/sqrt(1 - e2*sin(lat)^2)
t = tan(Pi/4 - lat/2)/((1 - e*sin(lat))/(1 + e*sin(lat)))^(e/2)
e2 = 0.006693883 is the earth eccentricity (Hughes ellipsoid)
e = sqrt(e2)
mc = m at the reference latitude (70 degrees)
tc = t at the reference latitude (70 degrees)
The ratio mc/tc is precalculated and stored in the variable m70_t70.
From Ben Smith PS scale m file (7/12/12)
### Response:
def scale_ps(lat):
"""
This function calculates the scaling factor for a polar stereographic
projection (ie. SSM/I grid) to correct area calculations. The scaling
factor is defined (from Snyder, 1982, Map Projections used by the U.S.
Geological Survey) as:
k = (mc/m)*(t/tc), where:
m = cos(lat)/sqrt(1 - e2*sin(lat)^2)
t = tan(Pi/4 - lat/2)/((1 - e*sin(lat))/(1 + e*sin(lat)))^(e/2)
e2 = 0.006693883 is the earth eccentricity (Hughes ellipsoid)
e = sqrt(e2)
mc = m at the reference latitude (70 degrees)
tc = t at the reference latitude (70 degrees)
The ratio mc/tc is precalculated and stored in the variable m70_t70.
From Ben Smith PS scale m file (7/12/12)
"""
lat = np.array(lat)
if np.any(lat > 0):
m70_t70 = 1.9332279
#Hack to deal with pole
lat[lat>=90.0] = 89.999999999
else:
# for 71 deg, southern PS -- checked BS 5/2012
m70_t70 = 1.93903005
lat[lat<=-90.0] = -89.999999999
#for WGS84, a=6378137, 1/f = 298.257223563 -> 1-sqrt(1-e^2) = f
#-> 1-(1-f)^2 = e2 = 0.006694379990141
#e2 = 0.006693883
e2 = 0.006694379990141 # BS calculated from WGS84 parameters 5/2012
e = np.sqrt(e2)
lat = np.abs(np.deg2rad(lat))
slat = np.sin(lat)
clat = np.cos(lat)
m = clat/np.sqrt(1. - e2*slat**2)
t = np.tan(np.pi/4 - lat/2)/((1. - e*slat)/(1. + e*slat))**(e/2)
k = m70_t70*t/m
scale=(1./k)
return scale |
def _extract_emd(mat, filename):
"""Extract the data from the EMD substruct, given a medusa-created MNU0-mat
file
Parameters
----------
mat: matlab-imported struct
"""
emd = mat['EMD'].squeeze()
# Labview epoch
epoch = datetime.datetime(1904, 1, 1)
def convert_epoch(x):
timestamp = epoch + datetime.timedelta(seconds=x.astype(float))
return timestamp
dfl = []
# loop over frequencies
for f_id in range(0, emd.size):
# print('Frequency: ', emd[f_id]['fm'])
fdata = emd[f_id]
# some consistency checks
if len(fdata['nu']) == 2 and fdata['nu'].shape[1] == 2:
raise Exception(
'Need MNU0 file, not a quadpole .mat file: {0}'.format(
filename
)
)
# fdata_md = md[f_id]
timestamp = np.atleast_2d(
[convert_epoch(x) for x in fdata['Time'].squeeze()]
).T
df = pd.DataFrame(
np.hstack((
timestamp,
fdata['ni'],
fdata['nu'][:, np.newaxis],
fdata['Zt3'],
fdata['Is3'],
fdata['Il3'],
fdata['Zg3'],
fdata['As3'][:, 0, :].squeeze(),
fdata['As3'][:, 1, :].squeeze(),
fdata['As3'][:, 2, :].squeeze(),
fdata['As3'][:, 3, :].squeeze(),
fdata['Yg13'],
fdata['Yg23'],
)),
)
df.columns = (
'datetime',
'a',
'b',
'p',
'Z1',
'Z2',
'Z3',
'Is1',
'Is2',
'Is3',
'Il1',
'Il2',
'Il3',
'Zg1',
'Zg2',
'Zg3',
'ShuntVoltage1_1',
'ShuntVoltage1_2',
'ShuntVoltage1_3',
'ShuntVoltage2_1',
'ShuntVoltage2_2',
'ShuntVoltage2_3',
'ShuntVoltage3_1',
'ShuntVoltage3_2',
'ShuntVoltage3_3',
'ShuntVoltage4_1',
'ShuntVoltage4_2',
'ShuntVoltage4_3',
'Yg13_1',
'Yg13_2',
'Yg13_3',
'Yg23_1',
'Yg23_2',
'Yg23_3',
)
df['frequency'] = np.ones(df.shape[0]) * fdata['fm']
# cast to correct type
df['datetime'] = pd.to_datetime(df['datetime'])
df['a'] = df['a'].astype(int)
df['b'] = df['b'].astype(int)
df['p'] = df['p'].astype(int)
df['Z1'] = df['Z1'].astype(complex)
df['Z2'] = df['Z2'].astype(complex)
df['Z3'] = df['Z3'].astype(complex)
df['Zg1'] = df['Zg1'].astype(complex)
df['Zg2'] = df['Zg2'].astype(complex)
df['Zg3'] = df['Zg3'].astype(complex)
df['Is1'] = df['Is1'].astype(complex)
df['Is2'] = df['Is2'].astype(complex)
df['Is3'] = df['Is3'].astype(complex)
df['Il1'] = df['Il1'].astype(complex)
df['Il2'] = df['Il2'].astype(complex)
df['Il3'] = df['Il3'].astype(complex)
df['ShuntVoltage1_1'] = df['ShuntVoltage1_1'].astype(complex)
df['ShuntVoltage1_2'] = df['ShuntVoltage1_2'].astype(complex)
df['ShuntVoltage1_3'] = df['ShuntVoltage1_3'].astype(complex)
df['ShuntVoltage2_1'] = df['ShuntVoltage2_1'].astype(complex)
df['ShuntVoltage2_2'] = df['ShuntVoltage2_2'].astype(complex)
df['ShuntVoltage2_3'] = df['ShuntVoltage2_3'].astype(complex)
df['ShuntVoltage3_1'] = df['ShuntVoltage3_1'].astype(complex)
df['ShuntVoltage3_2'] = df['ShuntVoltage3_2'].astype(complex)
df['ShuntVoltage3_3'] = df['ShuntVoltage3_3'].astype(complex)
df['ShuntVoltage4_1'] = df['ShuntVoltage4_1'].astype(complex)
df['ShuntVoltage4_2'] = df['ShuntVoltage4_2'].astype(complex)
df['ShuntVoltage4_3'] = df['ShuntVoltage4_3'].astype(complex)
dfl.append(df)
if len(dfl) == 0:
return None
df = pd.concat(dfl)
# average swapped current injections here!
# TODO
# sort current injections
condition = df['a'] > df['b']
df.loc[condition, ['a', 'b']] = df.loc[condition, ['b', 'a']].values
# change sign because we changed A and B
df.loc[condition, ['Z1', 'Z2', 'Z3']] *= -1
# average of Z1-Z3
df['Zt'] = np.mean(df[['Z1', 'Z2', 'Z3']].values, axis=1)
# we need to keep the sign of the real part
sign_re = df['Zt'].real / np.abs(df['Zt'].real)
df['r'] = np.abs(df['Zt']) * sign_re
# df['Zt_std'] = np.std(df[['Z1', 'Z2', 'Z3']].values, axis=1)
df['Is'] = np.mean(df[['Is1', 'Is2', 'Is3']].values, axis=1)
df['Il'] = np.mean(df[['Il1', 'Il2', 'Il3']].values, axis=1)
df['Zg'] = np.mean(df[['Zg1', 'Zg2', 'Zg3']].values, axis=1)
# "standard" injected current, in [mA]
df['Iab'] = np.abs(df['Is']) * 1e3
df['Iab'] = df['Iab'].astype(float)
# df['Is_std'] = np.std(df[['Is1', 'Is2', 'Is3']].values, axis=1)
return df | Extract the data from the EMD substruct, given a medusa-created MNU0-mat
file
Parameters
----------
mat: matlab-imported struct | Below is the the instruction that describes the task:
### Input:
Extract the data from the EMD substruct, given a medusa-created MNU0-mat
file
Parameters
----------
mat: matlab-imported struct
### Response:
def _extract_emd(mat, filename):
"""Extract the data from the EMD substruct, given a medusa-created MNU0-mat
file
Parameters
----------
mat: matlab-imported struct
"""
emd = mat['EMD'].squeeze()
# Labview epoch
epoch = datetime.datetime(1904, 1, 1)
def convert_epoch(x):
timestamp = epoch + datetime.timedelta(seconds=x.astype(float))
return timestamp
dfl = []
# loop over frequencies
for f_id in range(0, emd.size):
# print('Frequency: ', emd[f_id]['fm'])
fdata = emd[f_id]
# some consistency checks
if len(fdata['nu']) == 2 and fdata['nu'].shape[1] == 2:
raise Exception(
'Need MNU0 file, not a quadpole .mat file: {0}'.format(
filename
)
)
# fdata_md = md[f_id]
timestamp = np.atleast_2d(
[convert_epoch(x) for x in fdata['Time'].squeeze()]
).T
df = pd.DataFrame(
np.hstack((
timestamp,
fdata['ni'],
fdata['nu'][:, np.newaxis],
fdata['Zt3'],
fdata['Is3'],
fdata['Il3'],
fdata['Zg3'],
fdata['As3'][:, 0, :].squeeze(),
fdata['As3'][:, 1, :].squeeze(),
fdata['As3'][:, 2, :].squeeze(),
fdata['As3'][:, 3, :].squeeze(),
fdata['Yg13'],
fdata['Yg23'],
)),
)
df.columns = (
'datetime',
'a',
'b',
'p',
'Z1',
'Z2',
'Z3',
'Is1',
'Is2',
'Is3',
'Il1',
'Il2',
'Il3',
'Zg1',
'Zg2',
'Zg3',
'ShuntVoltage1_1',
'ShuntVoltage1_2',
'ShuntVoltage1_3',
'ShuntVoltage2_1',
'ShuntVoltage2_2',
'ShuntVoltage2_3',
'ShuntVoltage3_1',
'ShuntVoltage3_2',
'ShuntVoltage3_3',
'ShuntVoltage4_1',
'ShuntVoltage4_2',
'ShuntVoltage4_3',
'Yg13_1',
'Yg13_2',
'Yg13_3',
'Yg23_1',
'Yg23_2',
'Yg23_3',
)
df['frequency'] = np.ones(df.shape[0]) * fdata['fm']
# cast to correct type
df['datetime'] = pd.to_datetime(df['datetime'])
df['a'] = df['a'].astype(int)
df['b'] = df['b'].astype(int)
df['p'] = df['p'].astype(int)
df['Z1'] = df['Z1'].astype(complex)
df['Z2'] = df['Z2'].astype(complex)
df['Z3'] = df['Z3'].astype(complex)
df['Zg1'] = df['Zg1'].astype(complex)
df['Zg2'] = df['Zg2'].astype(complex)
df['Zg3'] = df['Zg3'].astype(complex)
df['Is1'] = df['Is1'].astype(complex)
df['Is2'] = df['Is2'].astype(complex)
df['Is3'] = df['Is3'].astype(complex)
df['Il1'] = df['Il1'].astype(complex)
df['Il2'] = df['Il2'].astype(complex)
df['Il3'] = df['Il3'].astype(complex)
df['ShuntVoltage1_1'] = df['ShuntVoltage1_1'].astype(complex)
df['ShuntVoltage1_2'] = df['ShuntVoltage1_2'].astype(complex)
df['ShuntVoltage1_3'] = df['ShuntVoltage1_3'].astype(complex)
df['ShuntVoltage2_1'] = df['ShuntVoltage2_1'].astype(complex)
df['ShuntVoltage2_2'] = df['ShuntVoltage2_2'].astype(complex)
df['ShuntVoltage2_3'] = df['ShuntVoltage2_3'].astype(complex)
df['ShuntVoltage3_1'] = df['ShuntVoltage3_1'].astype(complex)
df['ShuntVoltage3_2'] = df['ShuntVoltage3_2'].astype(complex)
df['ShuntVoltage3_3'] = df['ShuntVoltage3_3'].astype(complex)
df['ShuntVoltage4_1'] = df['ShuntVoltage4_1'].astype(complex)
df['ShuntVoltage4_2'] = df['ShuntVoltage4_2'].astype(complex)
df['ShuntVoltage4_3'] = df['ShuntVoltage4_3'].astype(complex)
dfl.append(df)
if len(dfl) == 0:
return None
df = pd.concat(dfl)
# average swapped current injections here!
# TODO
# sort current injections
condition = df['a'] > df['b']
df.loc[condition, ['a', 'b']] = df.loc[condition, ['b', 'a']].values
# change sign because we changed A and B
df.loc[condition, ['Z1', 'Z2', 'Z3']] *= -1
# average of Z1-Z3
df['Zt'] = np.mean(df[['Z1', 'Z2', 'Z3']].values, axis=1)
# we need to keep the sign of the real part
sign_re = df['Zt'].real / np.abs(df['Zt'].real)
df['r'] = np.abs(df['Zt']) * sign_re
# df['Zt_std'] = np.std(df[['Z1', 'Z2', 'Z3']].values, axis=1)
df['Is'] = np.mean(df[['Is1', 'Is2', 'Is3']].values, axis=1)
df['Il'] = np.mean(df[['Il1', 'Il2', 'Il3']].values, axis=1)
df['Zg'] = np.mean(df[['Zg1', 'Zg2', 'Zg3']].values, axis=1)
# "standard" injected current, in [mA]
df['Iab'] = np.abs(df['Is']) * 1e3
df['Iab'] = df['Iab'].astype(float)
# df['Is_std'] = np.std(df[['Is1', 'Is2', 'Is3']].values, axis=1)
return df |
def get_correlated_report_ids(self, indicators):
"""
DEPRECATED!
Retrieves a list of the IDs of all TruSTAR reports that contain the searched indicators.
:param indicators: A list of indicator values to retrieve correlated reports for.
:return: The list of IDs of reports that correlated.
Example:
>>> report_ids = ts.get_correlated_report_ids(["wannacry", "www.evil.com"])
>>> print(report_ids)
["e3bc6921-e2c8-42eb-829e-eea8da2d3f36", "4d04804f-ff82-4a0b-8586-c42aef2f6f73"]
"""
params = {'indicators': indicators}
resp = self._client.get("reports/correlate", params=params)
return resp.json() | DEPRECATED!
Retrieves a list of the IDs of all TruSTAR reports that contain the searched indicators.
:param indicators: A list of indicator values to retrieve correlated reports for.
:return: The list of IDs of reports that correlated.
Example:
>>> report_ids = ts.get_correlated_report_ids(["wannacry", "www.evil.com"])
>>> print(report_ids)
["e3bc6921-e2c8-42eb-829e-eea8da2d3f36", "4d04804f-ff82-4a0b-8586-c42aef2f6f73"] | Below is the the instruction that describes the task:
### Input:
DEPRECATED!
Retrieves a list of the IDs of all TruSTAR reports that contain the searched indicators.
:param indicators: A list of indicator values to retrieve correlated reports for.
:return: The list of IDs of reports that correlated.
Example:
>>> report_ids = ts.get_correlated_report_ids(["wannacry", "www.evil.com"])
>>> print(report_ids)
["e3bc6921-e2c8-42eb-829e-eea8da2d3f36", "4d04804f-ff82-4a0b-8586-c42aef2f6f73"]
### Response:
def get_correlated_report_ids(self, indicators):
"""
DEPRECATED!
Retrieves a list of the IDs of all TruSTAR reports that contain the searched indicators.
:param indicators: A list of indicator values to retrieve correlated reports for.
:return: The list of IDs of reports that correlated.
Example:
>>> report_ids = ts.get_correlated_report_ids(["wannacry", "www.evil.com"])
>>> print(report_ids)
["e3bc6921-e2c8-42eb-829e-eea8da2d3f36", "4d04804f-ff82-4a0b-8586-c42aef2f6f73"]
"""
params = {'indicators': indicators}
resp = self._client.get("reports/correlate", params=params)
return resp.json() |
def get_table_names_by_filter(self, dbname, filter, max_tables):
"""
Parameters:
- dbname
- filter
- max_tables
"""
self.send_get_table_names_by_filter(dbname, filter, max_tables)
return self.recv_get_table_names_by_filter() | Parameters:
- dbname
- filter
- max_tables | Below is the the instruction that describes the task:
### Input:
Parameters:
- dbname
- filter
- max_tables
### Response:
def get_table_names_by_filter(self, dbname, filter, max_tables):
"""
Parameters:
- dbname
- filter
- max_tables
"""
self.send_get_table_names_by_filter(dbname, filter, max_tables)
return self.recv_get_table_names_by_filter() |
def process_delimiter(self, char):
'''Process chars while not in a part'''
if char in self.whitespace:
return
if char in self.quote_chars:
# Store the quote type (' or ") and switch to quote processing.
self.inquote = char
self.process_char = self.process_quote
return
if char == self.eol_char:
self.complete = True
return
# Switch to processing a part.
self.process_char = self.process_part
self.process_char(char) | Process chars while not in a part | Below is the the instruction that describes the task:
### Input:
Process chars while not in a part
### Response:
def process_delimiter(self, char):
'''Process chars while not in a part'''
if char in self.whitespace:
return
if char in self.quote_chars:
# Store the quote type (' or ") and switch to quote processing.
self.inquote = char
self.process_char = self.process_quote
return
if char == self.eol_char:
self.complete = True
return
# Switch to processing a part.
self.process_char = self.process_part
self.process_char(char) |
def get_user_blocks(session, user_text):
"""
Returns a list of blocks for a single user
"""
logger.debug("Getting user_blocks for {0}".format(user_text))
doc = session.get(action='query', list='blocks', bkusers=user_text,
bkprop=['id'])
return doc['query']['blocks'] | Returns a list of blocks for a single user | Below is the the instruction that describes the task:
### Input:
Returns a list of blocks for a single user
### Response:
def get_user_blocks(session, user_text):
"""
Returns a list of blocks for a single user
"""
logger.debug("Getting user_blocks for {0}".format(user_text))
doc = session.get(action='query', list='blocks', bkusers=user_text,
bkprop=['id'])
return doc['query']['blocks'] |
def random_peak_magnitudes(
log,
peakMagnitudeDistributions,
snTypesArray,
plot=True):
"""
*Generate a numpy array of random (distribution weighted) peak magnitudes for the given sn types.*
**Key Arguments:**
- ``log`` -- logger
- ``peakMagnitudeDistributions`` -- yaml style dictionary of peak magnitude distributions
- ``snTypesArray`` -- the pre-generated array of random sn types
- ``plot`` -- generate plot?
**Return:**
- None
"""
################ > IMPORTS ################
## STANDARD LIB ##
## THIRD PARTY ##
import matplotlib.pyplot as plt
import numpy as np
## LOCAL APPLICATION ##
################ >ACTION(S) ################
magDistributions = {}
for snType, peakMag in peakMagnitudeDistributions['magnitude'].iteritems():
sigma = peakMagnitudeDistributions['sigma'][snType]
magDistributions[snType] = [peakMag, sigma]
peakMagList = []
for item in snTypesArray:
thisPeak = magDistributions[item][
1] * np.random.randn() + magDistributions[item][0]
peakMagList.append(thisPeak)
peakMagArray = np.array(peakMagList)
# log.debug('peakMagArray %s' % (peakMagArray,))
return peakMagArray | *Generate a numpy array of random (distribution weighted) peak magnitudes for the given sn types.*
**Key Arguments:**
- ``log`` -- logger
- ``peakMagnitudeDistributions`` -- yaml style dictionary of peak magnitude distributions
- ``snTypesArray`` -- the pre-generated array of random sn types
- ``plot`` -- generate plot?
**Return:**
- None | Below is the the instruction that describes the task:
### Input:
*Generate a numpy array of random (distribution weighted) peak magnitudes for the given sn types.*
**Key Arguments:**
- ``log`` -- logger
- ``peakMagnitudeDistributions`` -- yaml style dictionary of peak magnitude distributions
- ``snTypesArray`` -- the pre-generated array of random sn types
- ``plot`` -- generate plot?
**Return:**
- None
### Response:
def random_peak_magnitudes(
log,
peakMagnitudeDistributions,
snTypesArray,
plot=True):
"""
*Generate a numpy array of random (distribution weighted) peak magnitudes for the given sn types.*
**Key Arguments:**
- ``log`` -- logger
- ``peakMagnitudeDistributions`` -- yaml style dictionary of peak magnitude distributions
- ``snTypesArray`` -- the pre-generated array of random sn types
- ``plot`` -- generate plot?
**Return:**
- None
"""
################ > IMPORTS ################
## STANDARD LIB ##
## THIRD PARTY ##
import matplotlib.pyplot as plt
import numpy as np
## LOCAL APPLICATION ##
################ >ACTION(S) ################
magDistributions = {}
for snType, peakMag in peakMagnitudeDistributions['magnitude'].iteritems():
sigma = peakMagnitudeDistributions['sigma'][snType]
magDistributions[snType] = [peakMag, sigma]
peakMagList = []
for item in snTypesArray:
thisPeak = magDistributions[item][
1] * np.random.randn() + magDistributions[item][0]
peakMagList.append(thisPeak)
peakMagArray = np.array(peakMagList)
# log.debug('peakMagArray %s' % (peakMagArray,))
return peakMagArray |
def open(cls, title, conn=None, google_user=None,
google_password=None):
""" Open the spreadsheet named ``title``. If no spreadsheet with
that name exists, a new one will be created. """
spreadsheet = cls.by_title(title, conn=conn, google_user=google_user,
google_password=google_password)
if spreadsheet is None:
spreadsheet = cls.create(title, conn=conn, google_user=google_user,
google_password=google_password)
return spreadsheet | Open the spreadsheet named ``title``. If no spreadsheet with
that name exists, a new one will be created. | Below is the the instruction that describes the task:
### Input:
Open the spreadsheet named ``title``. If no spreadsheet with
that name exists, a new one will be created.
### Response:
def open(cls, title, conn=None, google_user=None,
google_password=None):
""" Open the spreadsheet named ``title``. If no spreadsheet with
that name exists, a new one will be created. """
spreadsheet = cls.by_title(title, conn=conn, google_user=google_user,
google_password=google_password)
if spreadsheet is None:
spreadsheet = cls.create(title, conn=conn, google_user=google_user,
google_password=google_password)
return spreadsheet |
def dump_view(cls, request):
"""Dumps sitetrees with items using django-smuggler.
:param request:
:return:
"""
from smuggler.views import dump_to_response
return dump_to_response(request, [MODEL_TREE, MODEL_TREE_ITEM], filename_prefix='sitetrees') | Dumps sitetrees with items using django-smuggler.
:param request:
:return: | Below is the the instruction that describes the task:
### Input:
Dumps sitetrees with items using django-smuggler.
:param request:
:return:
### Response:
def dump_view(cls, request):
"""Dumps sitetrees with items using django-smuggler.
:param request:
:return:
"""
from smuggler.views import dump_to_response
return dump_to_response(request, [MODEL_TREE, MODEL_TREE_ITEM], filename_prefix='sitetrees') |
def cleanup(self):
"""
Clean up finished children.
:return: None
"""
self.lock.acquire()
logger.debug('Acquired lock in cleanup for ' + str(self))
self.children = [child for child in self.children if child.is_alive()]
self.lock.release() | Clean up finished children.
:return: None | Below is the the instruction that describes the task:
### Input:
Clean up finished children.
:return: None
### Response:
def cleanup(self):
"""
Clean up finished children.
:return: None
"""
self.lock.acquire()
logger.debug('Acquired lock in cleanup for ' + str(self))
self.children = [child for child in self.children if child.is_alive()]
self.lock.release() |
def commit(self, msg):
"""
Commit outstanding data changes
"""
self.logger.info('Commit config: {}'.format(msg))
with Dir(self.data_path):
self.cmd.check_assert('git add .')
self.cmd.check_assert('git commit --allow-empty -m "{}"'.format(msg)) | Commit outstanding data changes | Below is the the instruction that describes the task:
### Input:
Commit outstanding data changes
### Response:
def commit(self, msg):
"""
Commit outstanding data changes
"""
self.logger.info('Commit config: {}'.format(msg))
with Dir(self.data_path):
self.cmd.check_assert('git add .')
self.cmd.check_assert('git commit --allow-empty -m "{}"'.format(msg)) |
def create_dcnm_out_nwk(self, tenant_id, fw_dict, is_fw_virt=False):
"""Create the DCNM OUT Network and update the result. """
tenant_name = fw_dict.get('tenant_name')
ret = self._create_service_nwk(tenant_id, tenant_name, 'out')
if ret:
res = fw_const.DCNM_OUT_NETWORK_CREATE_SUCCESS
LOG.info("out Service network created for tenant %s",
tenant_id)
else:
res = fw_const.DCNM_OUT_NETWORK_CREATE_FAIL
LOG.info("out Service network create failed for tenant %s",
tenant_id)
self.update_fw_db_result(tenant_id, dcnm_status=res)
return ret | Create the DCNM OUT Network and update the result. | Below is the the instruction that describes the task:
### Input:
Create the DCNM OUT Network and update the result.
### Response:
def create_dcnm_out_nwk(self, tenant_id, fw_dict, is_fw_virt=False):
"""Create the DCNM OUT Network and update the result. """
tenant_name = fw_dict.get('tenant_name')
ret = self._create_service_nwk(tenant_id, tenant_name, 'out')
if ret:
res = fw_const.DCNM_OUT_NETWORK_CREATE_SUCCESS
LOG.info("out Service network created for tenant %s",
tenant_id)
else:
res = fw_const.DCNM_OUT_NETWORK_CREATE_FAIL
LOG.info("out Service network create failed for tenant %s",
tenant_id)
self.update_fw_db_result(tenant_id, dcnm_status=res)
return ret |
def iter_format_block(
self, text=None,
width=60, chars=False, fill=False, newlines=False,
append=None, prepend=None, strip_first=False, strip_last=False,
lstrip=False):
""" Iterate over lines in a formatted block of text.
This iterator allows you to prepend to each line.
For basic blocks see iter_block().
Arguments:
text : String to format.
width : Maximum width for each line. The prepend string
is not included in this calculation.
Default: 60
chars : Whether to wrap on characters instead of spaces.
Default: False
fill : Insert spaces between words so that each line is
the same width. This overrides `chars`.
Default: False
newlines : Whether to preserve newlines in the original
string.
Default: False
append : String to append after each line.
prepend : String to prepend before each line.
strip_first : Whether to omit the prepend string for the first
line.
Default: False
Example (when using prepend='$'):
Without strip_first -> '$this', '$that'
With strip_first -> 'this', '$that'
strip_last : Whether to omit the append string for the last
line (like strip_first does for prepend).
Default: False
lstrip : Whether to remove leading spaces from each line.
This doesn't include any spaces in `prepend`.
Default: False
"""
if fill:
chars = False
iterlines = self.iter_block(
(self.text if text is None else text) or '',
width=width,
chars=chars,
newlines=newlines,
lstrip=lstrip,
)
if not (prepend or append):
# Shortcut some of the logic below when not prepending/appending.
if fill:
yield from (
self.expand_words(l, width=width) for l in iterlines
)
else:
yield from iterlines
else:
# Prepend, append, or both prepend/append to each line.
if prepend:
prependlen = len(prepend)
else:
# No prepend, stripping not necessary and shouldn't be tried.
strip_first = False
prependlen = 0
if append:
# Unfortunately appending mean exhausting the generator.
# I don't know where the last line is if I don't.
lines = list(iterlines)
lasti = len(lines) - 1
iterlines = (l for l in lines)
appendlen = len(append)
else:
# No append, stripping not necessary and shouldn't be tried.
strip_last = False
appendlen = 0
lasti = -1
for i, l in enumerate(self.iter_add_text(
iterlines,
prepend=prepend,
append=append)):
if strip_first and (i == 0):
# Strip the prepend that iter_add_text() added.
l = l[prependlen:]
elif strip_last and (i == lasti):
# Strip the append that iter_add_text() added.
l = l[:-appendlen]
if fill:
yield self.expand_words(l, width=width)
else:
yield l | Iterate over lines in a formatted block of text.
This iterator allows you to prepend to each line.
For basic blocks see iter_block().
Arguments:
text : String to format.
width : Maximum width for each line. The prepend string
is not included in this calculation.
Default: 60
chars : Whether to wrap on characters instead of spaces.
Default: False
fill : Insert spaces between words so that each line is
the same width. This overrides `chars`.
Default: False
newlines : Whether to preserve newlines in the original
string.
Default: False
append : String to append after each line.
prepend : String to prepend before each line.
strip_first : Whether to omit the prepend string for the first
line.
Default: False
Example (when using prepend='$'):
Without strip_first -> '$this', '$that'
With strip_first -> 'this', '$that'
strip_last : Whether to omit the append string for the last
line (like strip_first does for prepend).
Default: False
lstrip : Whether to remove leading spaces from each line.
This doesn't include any spaces in `prepend`.
Default: False | Below is the the instruction that describes the task:
### Input:
Iterate over lines in a formatted block of text.
This iterator allows you to prepend to each line.
For basic blocks see iter_block().
Arguments:
text : String to format.
width : Maximum width for each line. The prepend string
is not included in this calculation.
Default: 60
chars : Whether to wrap on characters instead of spaces.
Default: False
fill : Insert spaces between words so that each line is
the same width. This overrides `chars`.
Default: False
newlines : Whether to preserve newlines in the original
string.
Default: False
append : String to append after each line.
prepend : String to prepend before each line.
strip_first : Whether to omit the prepend string for the first
line.
Default: False
Example (when using prepend='$'):
Without strip_first -> '$this', '$that'
With strip_first -> 'this', '$that'
strip_last : Whether to omit the append string for the last
line (like strip_first does for prepend).
Default: False
lstrip : Whether to remove leading spaces from each line.
This doesn't include any spaces in `prepend`.
Default: False
### Response:
def iter_format_block(
self, text=None,
width=60, chars=False, fill=False, newlines=False,
append=None, prepend=None, strip_first=False, strip_last=False,
lstrip=False):
""" Iterate over lines in a formatted block of text.
This iterator allows you to prepend to each line.
For basic blocks see iter_block().
Arguments:
text : String to format.
width : Maximum width for each line. The prepend string
is not included in this calculation.
Default: 60
chars : Whether to wrap on characters instead of spaces.
Default: False
fill : Insert spaces between words so that each line is
the same width. This overrides `chars`.
Default: False
newlines : Whether to preserve newlines in the original
string.
Default: False
append : String to append after each line.
prepend : String to prepend before each line.
strip_first : Whether to omit the prepend string for the first
line.
Default: False
Example (when using prepend='$'):
Without strip_first -> '$this', '$that'
With strip_first -> 'this', '$that'
strip_last : Whether to omit the append string for the last
line (like strip_first does for prepend).
Default: False
lstrip : Whether to remove leading spaces from each line.
This doesn't include any spaces in `prepend`.
Default: False
"""
if fill:
chars = False
iterlines = self.iter_block(
(self.text if text is None else text) or '',
width=width,
chars=chars,
newlines=newlines,
lstrip=lstrip,
)
if not (prepend or append):
# Shortcut some of the logic below when not prepending/appending.
if fill:
yield from (
self.expand_words(l, width=width) for l in iterlines
)
else:
yield from iterlines
else:
# Prepend, append, or both prepend/append to each line.
if prepend:
prependlen = len(prepend)
else:
# No prepend, stripping not necessary and shouldn't be tried.
strip_first = False
prependlen = 0
if append:
# Unfortunately appending mean exhausting the generator.
# I don't know where the last line is if I don't.
lines = list(iterlines)
lasti = len(lines) - 1
iterlines = (l for l in lines)
appendlen = len(append)
else:
# No append, stripping not necessary and shouldn't be tried.
strip_last = False
appendlen = 0
lasti = -1
for i, l in enumerate(self.iter_add_text(
iterlines,
prepend=prepend,
append=append)):
if strip_first and (i == 0):
# Strip the prepend that iter_add_text() added.
l = l[prependlen:]
elif strip_last and (i == lasti):
# Strip the append that iter_add_text() added.
l = l[:-appendlen]
if fill:
yield self.expand_words(l, width=width)
else:
yield l |
def run(self):
"""Start the server on the local IP at port 1400 (default).
Handling of requests is delegated to an instance of the
`EventNotifyHandler` class.
"""
listener = EventServer(self.address, EventNotifyHandler)
log.info("Event listener running on %s", listener.server_address)
# Listen for events until told to stop
while not self.stop_flag.is_set():
listener.handle_request() | Start the server on the local IP at port 1400 (default).
Handling of requests is delegated to an instance of the
`EventNotifyHandler` class. | Below is the the instruction that describes the task:
### Input:
Start the server on the local IP at port 1400 (default).
Handling of requests is delegated to an instance of the
`EventNotifyHandler` class.
### Response:
def run(self):
"""Start the server on the local IP at port 1400 (default).
Handling of requests is delegated to an instance of the
`EventNotifyHandler` class.
"""
listener = EventServer(self.address, EventNotifyHandler)
log.info("Event listener running on %s", listener.server_address)
# Listen for events until told to stop
while not self.stop_flag.is_set():
listener.handle_request() |
def stripped_lib_dict(lib_dict, strip_prefix):
""" Return `lib_dict` with `strip_prefix` removed from start of paths
Use to give form of `lib_dict` that appears relative to some base path
given by `strip_prefix`. Particularly useful for analyzing wheels where we
unpack to a temporary path before analyzing.
Parameters
----------
lib_dict : dict
See :func:`tree_libs` for definition. All depending and depended paths
are canonical (therefore absolute)
strip_prefix : str
Prefix to remove (if present) from all depended and depending library
paths in `lib_dict`
Returns
-------
relative_dict : dict
`lib_dict` with `strip_prefix` removed from beginning of all depended
and depending library paths.
"""
relative_dict = {}
stripper = get_prefix_stripper(strip_prefix)
for lib_path, dependings_dict in lib_dict.items():
ding_dict = {}
for depending_libpath, install_name in dependings_dict.items():
ding_dict[stripper(depending_libpath)] = install_name
relative_dict[stripper(lib_path)] = ding_dict
return relative_dict | Return `lib_dict` with `strip_prefix` removed from start of paths
Use to give form of `lib_dict` that appears relative to some base path
given by `strip_prefix`. Particularly useful for analyzing wheels where we
unpack to a temporary path before analyzing.
Parameters
----------
lib_dict : dict
See :func:`tree_libs` for definition. All depending and depended paths
are canonical (therefore absolute)
strip_prefix : str
Prefix to remove (if present) from all depended and depending library
paths in `lib_dict`
Returns
-------
relative_dict : dict
`lib_dict` with `strip_prefix` removed from beginning of all depended
and depending library paths. | Below is the the instruction that describes the task:
### Input:
Return `lib_dict` with `strip_prefix` removed from start of paths
Use to give form of `lib_dict` that appears relative to some base path
given by `strip_prefix`. Particularly useful for analyzing wheels where we
unpack to a temporary path before analyzing.
Parameters
----------
lib_dict : dict
See :func:`tree_libs` for definition. All depending and depended paths
are canonical (therefore absolute)
strip_prefix : str
Prefix to remove (if present) from all depended and depending library
paths in `lib_dict`
Returns
-------
relative_dict : dict
`lib_dict` with `strip_prefix` removed from beginning of all depended
and depending library paths.
### Response:
def stripped_lib_dict(lib_dict, strip_prefix):
""" Return `lib_dict` with `strip_prefix` removed from start of paths
Use to give form of `lib_dict` that appears relative to some base path
given by `strip_prefix`. Particularly useful for analyzing wheels where we
unpack to a temporary path before analyzing.
Parameters
----------
lib_dict : dict
See :func:`tree_libs` for definition. All depending and depended paths
are canonical (therefore absolute)
strip_prefix : str
Prefix to remove (if present) from all depended and depending library
paths in `lib_dict`
Returns
-------
relative_dict : dict
`lib_dict` with `strip_prefix` removed from beginning of all depended
and depending library paths.
"""
relative_dict = {}
stripper = get_prefix_stripper(strip_prefix)
for lib_path, dependings_dict in lib_dict.items():
ding_dict = {}
for depending_libpath, install_name in dependings_dict.items():
ding_dict[stripper(depending_libpath)] = install_name
relative_dict[stripper(lib_path)] = ding_dict
return relative_dict |
def posix_rename(self, oldpath, newpath):
"""
Rename a file or folder from ``oldpath`` to ``newpath``, following
posix conventions.
:param str oldpath: existing name of the file or folder
:param str newpath: new name for the file or folder, will be
overwritten if it already exists
:raises:
``IOError`` -- if ``newpath`` is a folder, posix-rename is not
supported by the server or something else goes wrong
:versionadded: 2.2
"""
oldpath = self._adjust_cwd(oldpath)
newpath = self._adjust_cwd(newpath)
self._log(DEBUG, "posix_rename({!r}, {!r})".format(oldpath, newpath))
self._request(
CMD_EXTENDED, "[email protected]", oldpath, newpath
) | Rename a file or folder from ``oldpath`` to ``newpath``, following
posix conventions.
:param str oldpath: existing name of the file or folder
:param str newpath: new name for the file or folder, will be
overwritten if it already exists
:raises:
``IOError`` -- if ``newpath`` is a folder, posix-rename is not
supported by the server or something else goes wrong
:versionadded: 2.2 | Below is the the instruction that describes the task:
### Input:
Rename a file or folder from ``oldpath`` to ``newpath``, following
posix conventions.
:param str oldpath: existing name of the file or folder
:param str newpath: new name for the file or folder, will be
overwritten if it already exists
:raises:
``IOError`` -- if ``newpath`` is a folder, posix-rename is not
supported by the server or something else goes wrong
:versionadded: 2.2
### Response:
def posix_rename(self, oldpath, newpath):
"""
Rename a file or folder from ``oldpath`` to ``newpath``, following
posix conventions.
:param str oldpath: existing name of the file or folder
:param str newpath: new name for the file or folder, will be
overwritten if it already exists
:raises:
``IOError`` -- if ``newpath`` is a folder, posix-rename is not
supported by the server or something else goes wrong
:versionadded: 2.2
"""
oldpath = self._adjust_cwd(oldpath)
newpath = self._adjust_cwd(newpath)
self._log(DEBUG, "posix_rename({!r}, {!r})".format(oldpath, newpath))
self._request(
CMD_EXTENDED, "[email protected]", oldpath, newpath
) |
def _url_parts(self, url_suffix, data, content_type):
"""
Format the url data based on config_type.
:param url_suffix: str URL path we are sending a GET/POST/PUT to
:param data: object data we are sending
:param content_type: str from ContentType that determines how we format the data
:return: complete url, formatted data, and headers for sending
"""
url = self.base_url + url_suffix
send_data = data
if content_type == ContentType.json:
send_data = json.dumps(data)
headers = {
'Content-Type': content_type,
'User-Agent': self.user_agent_str,
}
if self.auth:
headers['Authorization'] = self.auth.get_auth()
return url, send_data, headers | Format the url data based on config_type.
:param url_suffix: str URL path we are sending a GET/POST/PUT to
:param data: object data we are sending
:param content_type: str from ContentType that determines how we format the data
:return: complete url, formatted data, and headers for sending | Below is the the instruction that describes the task:
### Input:
Format the url data based on config_type.
:param url_suffix: str URL path we are sending a GET/POST/PUT to
:param data: object data we are sending
:param content_type: str from ContentType that determines how we format the data
:return: complete url, formatted data, and headers for sending
### Response:
def _url_parts(self, url_suffix, data, content_type):
"""
Format the url data based on config_type.
:param url_suffix: str URL path we are sending a GET/POST/PUT to
:param data: object data we are sending
:param content_type: str from ContentType that determines how we format the data
:return: complete url, formatted data, and headers for sending
"""
url = self.base_url + url_suffix
send_data = data
if content_type == ContentType.json:
send_data = json.dumps(data)
headers = {
'Content-Type': content_type,
'User-Agent': self.user_agent_str,
}
if self.auth:
headers['Authorization'] = self.auth.get_auth()
return url, send_data, headers |
def mutate(self, node, index):
"""Modify the For loop to evaluate to None"""
assert index == 0
assert isinstance(node, ForStmt)
empty_list = parso.parse(' []')
node.children[3] = empty_list
return node | Modify the For loop to evaluate to None | Below is the the instruction that describes the task:
### Input:
Modify the For loop to evaluate to None
### Response:
def mutate(self, node, index):
"""Modify the For loop to evaluate to None"""
assert index == 0
assert isinstance(node, ForStmt)
empty_list = parso.parse(' []')
node.children[3] = empty_list
return node |
def addSourceAddr(self, addr):
"""None means 'system default'"""
try:
self._multiInSocket.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, self._makeMreq(addr))
except socket.error: # if 1 interface has more than 1 address, exception is raised for the second
pass
sock = self._createMulticastOutSocket(addr, self._observer.ttl)
self._multiOutUniInSockets[addr] = sock
self._poll.register(sock, select.POLLIN) | None means 'system default | Below is the the instruction that describes the task:
### Input:
None means 'system default
### Response:
def addSourceAddr(self, addr):
"""None means 'system default'"""
try:
self._multiInSocket.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, self._makeMreq(addr))
except socket.error: # if 1 interface has more than 1 address, exception is raised for the second
pass
sock = self._createMulticastOutSocket(addr, self._observer.ttl)
self._multiOutUniInSockets[addr] = sock
self._poll.register(sock, select.POLLIN) |
def _enqueue(self, msg):
"""Push a new `msg` onto the queue, return `(success, msg)`"""
self.log.debug('queueing: %s', msg)
if self.queue.full():
self.log.warn('librato_bg queue is full')
return False, msg
self.queue.put(msg)
self.log.debug('enqueued %s.', msg)
return True, msg | Push a new `msg` onto the queue, return `(success, msg)` | Below is the the instruction that describes the task:
### Input:
Push a new `msg` onto the queue, return `(success, msg)`
### Response:
def _enqueue(self, msg):
"""Push a new `msg` onto the queue, return `(success, msg)`"""
self.log.debug('queueing: %s', msg)
if self.queue.full():
self.log.warn('librato_bg queue is full')
return False, msg
self.queue.put(msg)
self.log.debug('enqueued %s.', msg)
return True, msg |
def extract_cookies(self, response, request):
"""Extract cookies from response, where allowable given the request."""
_debug("extract_cookies: %s", response.info())
self._cookies_lock.acquire()
try:
self._policy._now = self._now = int(time.time())
for cookie in self.make_cookies(response, request):
if self._policy.set_ok(cookie, request):
_debug(" setting cookie: %s", cookie)
self.set_cookie(cookie)
finally:
self._cookies_lock.release() | Extract cookies from response, where allowable given the request. | Below is the the instruction that describes the task:
### Input:
Extract cookies from response, where allowable given the request.
### Response:
def extract_cookies(self, response, request):
"""Extract cookies from response, where allowable given the request."""
_debug("extract_cookies: %s", response.info())
self._cookies_lock.acquire()
try:
self._policy._now = self._now = int(time.time())
for cookie in self.make_cookies(response, request):
if self._policy.set_ok(cookie, request):
_debug(" setting cookie: %s", cookie)
self.set_cookie(cookie)
finally:
self._cookies_lock.release() |
def setup_path():
"""Sets up the python include paths to include src"""
import os.path; import sys
if sys.argv[0]:
top_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
sys.path = [os.path.join(top_dir, "src")] + sys.path
pass
return | Sets up the python include paths to include src | Below is the the instruction that describes the task:
### Input:
Sets up the python include paths to include src
### Response:
def setup_path():
"""Sets up the python include paths to include src"""
import os.path; import sys
if sys.argv[0]:
top_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
sys.path = [os.path.join(top_dir, "src")] + sys.path
pass
return |
def init_crash_handler(self):
"""Create a crash handler, typically setting sys.excepthook to it."""
self.crash_handler = self.crash_handler_class(self)
sys.excepthook = self.excepthook
def unset_crashhandler():
sys.excepthook = sys.__excepthook__
atexit.register(unset_crashhandler) | Create a crash handler, typically setting sys.excepthook to it. | Below is the the instruction that describes the task:
### Input:
Create a crash handler, typically setting sys.excepthook to it.
### Response:
def init_crash_handler(self):
"""Create a crash handler, typically setting sys.excepthook to it."""
self.crash_handler = self.crash_handler_class(self)
sys.excepthook = self.excepthook
def unset_crashhandler():
sys.excepthook = sys.__excepthook__
atexit.register(unset_crashhandler) |
def _compile(pattern, flags):
"""Compile the pattern to regex."""
return re.compile(WcParse(pattern, flags & FLAG_MASK).parse()) | Compile the pattern to regex. | Below is the the instruction that describes the task:
### Input:
Compile the pattern to regex.
### Response:
def _compile(pattern, flags):
"""Compile the pattern to regex."""
return re.compile(WcParse(pattern, flags & FLAG_MASK).parse()) |
def get_codeblock_node(self, code, language):
"""this is copied from sphinx.directives.code.CodeBlock.run
it has been changed to accept code and language as an arguments instead
of reading from self
"""
# type: () -> List[nodes.Node]
document = self.state.document
location = self.state_machine.get_source_and_line(self.lineno)
linespec = self.options.get('emphasize-lines')
if linespec:
try:
nlines = len(code.split('\n'))
hl_lines = parselinenos(linespec, nlines)
if any(i >= nlines for i in hl_lines):
log.warning(__('line number spec is out of range(1-%d): %r') %
(nlines, self.options['emphasize-lines']),
location=location)
hl_lines = [x + 1 for x in hl_lines if x < nlines]
except ValueError as err:
return [document.reporter.warning(str(err), line=self.lineno)]
else:
hl_lines = None
if 'dedent' in self.options:
location = self.state_machine.get_source_and_line(self.lineno)
lines = code.split('\n')
lines = dedent_lines(lines, self.options['dedent'], location=location)
code = '\n'.join(lines)
literal = nodes.literal_block(code, code)
literal['language'] = language
literal['linenos'] = 'linenos' in self.options or \
'lineno-start' in self.options
literal['classes'] += self.options.get('class', [])
extra_args = literal['highlight_args'] = {}
if hl_lines is not None:
extra_args['hl_lines'] = hl_lines
if 'lineno-start' in self.options:
extra_args['linenostart'] = self.options['lineno-start']
set_source_info(self, literal)
caption = self.options.get('caption')
if caption:
try:
literal = container_wrapper(self, literal, caption)
except ValueError as exc:
return [document.reporter.warning(text_type(exc), line=self.lineno)]
# literal will be note_implicit_target that is linked from caption and numref.
# when options['name'] is provided, it should be primary ID.
self.add_name(literal)
return [literal] | this is copied from sphinx.directives.code.CodeBlock.run
it has been changed to accept code and language as an arguments instead
of reading from self | Below is the the instruction that describes the task:
### Input:
this is copied from sphinx.directives.code.CodeBlock.run
it has been changed to accept code and language as an arguments instead
of reading from self
### Response:
def get_codeblock_node(self, code, language):
"""this is copied from sphinx.directives.code.CodeBlock.run
it has been changed to accept code and language as an arguments instead
of reading from self
"""
# type: () -> List[nodes.Node]
document = self.state.document
location = self.state_machine.get_source_and_line(self.lineno)
linespec = self.options.get('emphasize-lines')
if linespec:
try:
nlines = len(code.split('\n'))
hl_lines = parselinenos(linespec, nlines)
if any(i >= nlines for i in hl_lines):
log.warning(__('line number spec is out of range(1-%d): %r') %
(nlines, self.options['emphasize-lines']),
location=location)
hl_lines = [x + 1 for x in hl_lines if x < nlines]
except ValueError as err:
return [document.reporter.warning(str(err), line=self.lineno)]
else:
hl_lines = None
if 'dedent' in self.options:
location = self.state_machine.get_source_and_line(self.lineno)
lines = code.split('\n')
lines = dedent_lines(lines, self.options['dedent'], location=location)
code = '\n'.join(lines)
literal = nodes.literal_block(code, code)
literal['language'] = language
literal['linenos'] = 'linenos' in self.options or \
'lineno-start' in self.options
literal['classes'] += self.options.get('class', [])
extra_args = literal['highlight_args'] = {}
if hl_lines is not None:
extra_args['hl_lines'] = hl_lines
if 'lineno-start' in self.options:
extra_args['linenostart'] = self.options['lineno-start']
set_source_info(self, literal)
caption = self.options.get('caption')
if caption:
try:
literal = container_wrapper(self, literal, caption)
except ValueError as exc:
return [document.reporter.warning(text_type(exc), line=self.lineno)]
# literal will be note_implicit_target that is linked from caption and numref.
# when options['name'] is provided, it should be primary ID.
self.add_name(literal)
return [literal] |
def create_dataset(self, dataset, exists_ok=False, retry=DEFAULT_RETRY):
"""API call: create the dataset via a POST request.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/insert
Args:
dataset (Union[ \
:class:`~google.cloud.bigquery.dataset.Dataset`, \
:class:`~google.cloud.bigquery.dataset.DatasetReference`, \
str, \
]):
A :class:`~google.cloud.bigquery.dataset.Dataset` to create.
If ``dataset`` is a reference, an empty dataset is created
with the specified ID and client's default location.
exists_ok (bool):
Defaults to ``False``. If ``True``, ignore "already exists"
errors when creating the dataset.
retry (google.api_core.retry.Retry):
Optional. How to retry the RPC.
Returns:
google.cloud.bigquery.dataset.Dataset:
A new ``Dataset`` returned from the API.
Example:
>>> from google.cloud import bigquery
>>> client = bigquery.Client()
>>> dataset = bigquery.Dataset(client.dataset('my_dataset'))
>>> dataset = client.create_dataset(dataset)
"""
if isinstance(dataset, str):
dataset = DatasetReference.from_string(
dataset, default_project=self.project
)
if isinstance(dataset, DatasetReference):
dataset = Dataset(dataset)
path = "/projects/%s/datasets" % (dataset.project,)
data = dataset.to_api_repr()
if data.get("location") is None and self.location is not None:
data["location"] = self.location
try:
api_response = self._call_api(retry, method="POST", path=path, data=data)
return Dataset.from_api_repr(api_response)
except google.api_core.exceptions.Conflict:
if not exists_ok:
raise
return self.get_dataset(dataset.reference, retry=retry) | API call: create the dataset via a POST request.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/insert
Args:
dataset (Union[ \
:class:`~google.cloud.bigquery.dataset.Dataset`, \
:class:`~google.cloud.bigquery.dataset.DatasetReference`, \
str, \
]):
A :class:`~google.cloud.bigquery.dataset.Dataset` to create.
If ``dataset`` is a reference, an empty dataset is created
with the specified ID and client's default location.
exists_ok (bool):
Defaults to ``False``. If ``True``, ignore "already exists"
errors when creating the dataset.
retry (google.api_core.retry.Retry):
Optional. How to retry the RPC.
Returns:
google.cloud.bigquery.dataset.Dataset:
A new ``Dataset`` returned from the API.
Example:
>>> from google.cloud import bigquery
>>> client = bigquery.Client()
>>> dataset = bigquery.Dataset(client.dataset('my_dataset'))
>>> dataset = client.create_dataset(dataset) | Below is the the instruction that describes the task:
### Input:
API call: create the dataset via a POST request.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/insert
Args:
dataset (Union[ \
:class:`~google.cloud.bigquery.dataset.Dataset`, \
:class:`~google.cloud.bigquery.dataset.DatasetReference`, \
str, \
]):
A :class:`~google.cloud.bigquery.dataset.Dataset` to create.
If ``dataset`` is a reference, an empty dataset is created
with the specified ID and client's default location.
exists_ok (bool):
Defaults to ``False``. If ``True``, ignore "already exists"
errors when creating the dataset.
retry (google.api_core.retry.Retry):
Optional. How to retry the RPC.
Returns:
google.cloud.bigquery.dataset.Dataset:
A new ``Dataset`` returned from the API.
Example:
>>> from google.cloud import bigquery
>>> client = bigquery.Client()
>>> dataset = bigquery.Dataset(client.dataset('my_dataset'))
>>> dataset = client.create_dataset(dataset)
### Response:
def create_dataset(self, dataset, exists_ok=False, retry=DEFAULT_RETRY):
"""API call: create the dataset via a POST request.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/insert
Args:
dataset (Union[ \
:class:`~google.cloud.bigquery.dataset.Dataset`, \
:class:`~google.cloud.bigquery.dataset.DatasetReference`, \
str, \
]):
A :class:`~google.cloud.bigquery.dataset.Dataset` to create.
If ``dataset`` is a reference, an empty dataset is created
with the specified ID and client's default location.
exists_ok (bool):
Defaults to ``False``. If ``True``, ignore "already exists"
errors when creating the dataset.
retry (google.api_core.retry.Retry):
Optional. How to retry the RPC.
Returns:
google.cloud.bigquery.dataset.Dataset:
A new ``Dataset`` returned from the API.
Example:
>>> from google.cloud import bigquery
>>> client = bigquery.Client()
>>> dataset = bigquery.Dataset(client.dataset('my_dataset'))
>>> dataset = client.create_dataset(dataset)
"""
if isinstance(dataset, str):
dataset = DatasetReference.from_string(
dataset, default_project=self.project
)
if isinstance(dataset, DatasetReference):
dataset = Dataset(dataset)
path = "/projects/%s/datasets" % (dataset.project,)
data = dataset.to_api_repr()
if data.get("location") is None and self.location is not None:
data["location"] = self.location
try:
api_response = self._call_api(retry, method="POST", path=path, data=data)
return Dataset.from_api_repr(api_response)
except google.api_core.exceptions.Conflict:
if not exists_ok:
raise
return self.get_dataset(dataset.reference, retry=retry) |
def make_link_node(rawtext, app, name, options):
"""
Create a link to the TL reference.
:param rawtext: Text being replaced with link node.
:param app: Sphinx application context
:param name: Name of the object to link to
:param options: Options dictionary passed to role func.
"""
try:
base = app.config.tl_ref_url
if not base:
raise AttributeError
except AttributeError as e:
raise ValueError('tl_ref_url config value is not set') from e
if base[-1] != '/':
base += '/'
set_classes(options)
node = nodes.reference(rawtext, utils.unescape(name),
refuri='{}?q={}'.format(base, name),
**options)
return node | Create a link to the TL reference.
:param rawtext: Text being replaced with link node.
:param app: Sphinx application context
:param name: Name of the object to link to
:param options: Options dictionary passed to role func. | Below is the the instruction that describes the task:
### Input:
Create a link to the TL reference.
:param rawtext: Text being replaced with link node.
:param app: Sphinx application context
:param name: Name of the object to link to
:param options: Options dictionary passed to role func.
### Response:
def make_link_node(rawtext, app, name, options):
"""
Create a link to the TL reference.
:param rawtext: Text being replaced with link node.
:param app: Sphinx application context
:param name: Name of the object to link to
:param options: Options dictionary passed to role func.
"""
try:
base = app.config.tl_ref_url
if not base:
raise AttributeError
except AttributeError as e:
raise ValueError('tl_ref_url config value is not set') from e
if base[-1] != '/':
base += '/'
set_classes(options)
node = nodes.reference(rawtext, utils.unescape(name),
refuri='{}?q={}'.format(base, name),
**options)
return node |
def get_edges_from_parsed_data(parsed_data):
""" Taking into account fragment type (circular|linear) and retrieved gene order information translates adjacencies between blocks into edges for addition to the :class:`bg.breakpoint_graph.BreakpointGraph`
In case supplied fragment is linear (``$``) special artificial vertices (with ``__infinity`` suffix) are introduced to denote fragment extremities
:param parsed_data: (``$`` | ``@``, [(``+`` | ``-``, block_name),...]) formatted data about fragment type and ordered list of oriented blocks
:type parsed_data: ``tuple(str, list((str, str), ...))``
:return: a list of vertices pairs that would correspond to edges in :class:`bg.breakpoint_graph.BreakpointGraph`
:rtype: ``list((str, str), ...)``
"""
chr_type, blocks = parsed_data
vertices = []
for block in blocks:
###############################################################################################
#
# each block is represented as a pair of vertices (that correspond to block extremities)
#
###############################################################################################
v1, v2 = GRIMMReader.__assign_vertex_pair(block)
vertices.append(v1)
vertices.append(v2)
if chr_type == "@":
###############################################################################################
#
# if we parse a circular genomic fragment we must introduce an additional pair of vertices (edge)
# that would connect two outer most vertices in the vertex list, thus connecting fragment extremities
#
###############################################################################################
vertex = vertices.pop()
vertices.insert(0, vertex)
elif chr_type == "$":
###############################################################################################
#
# if we parse linear genomic fragment, we introduce two artificial (infinity) vertices
# that correspond to fragments ends, and introduce edges between them and respective outermost block vertices
#
# if outermost vertices at this moment are repeat vertices, the outermost pair shall be discarded and the innermost
# vertex info shall be utilized in the infinity vertex, that is introduced for the fragment extremity
#
###############################################################################################
if vertices[0].is_repeat_vertex:
left_iv_tags = sorted([(tag, value) if tag != "repeat" else (tag, BGVertex.get_vertex_name_root(vertices[1].name))
for tag, value in vertices[1].tags])
left_iv_root_name = BGVertex.get_vertex_name_root(vertices[2].name)
vertices = vertices[2:]
else:
left_iv_tags = []
left_iv_root_name = vertices[0].name
if vertices[-1].is_repeat_vertex:
right_iv_tags = sorted(
[(tag, value) if tag != "repeat" else (tag, BGVertex.get_vertex_name_root(vertices[-2].name))
for tag, value in vertices[-2].tags])
right_iv_root_name = BGVertex.get_vertex_name_root(vertices[-3].name)
vertices = vertices[:-2]
else:
right_iv_tags = []
right_iv_root_name = BGVertex.get_vertex_name_root(vertices[-1].name)
left_iv, right_iv = TaggedInfinityVertex(left_iv_root_name), TaggedInfinityVertex(right_iv_root_name)
left_iv.tags = left_iv_tags
right_iv.tags = right_iv_tags
vertices.insert(0, left_iv)
vertices.append(right_iv)
return [(v1, v2) for v1, v2 in zip(vertices[::2], vertices[1::2])] | Taking into account fragment type (circular|linear) and retrieved gene order information translates adjacencies between blocks into edges for addition to the :class:`bg.breakpoint_graph.BreakpointGraph`
In case supplied fragment is linear (``$``) special artificial vertices (with ``__infinity`` suffix) are introduced to denote fragment extremities
:param parsed_data: (``$`` | ``@``, [(``+`` | ``-``, block_name),...]) formatted data about fragment type and ordered list of oriented blocks
:type parsed_data: ``tuple(str, list((str, str), ...))``
:return: a list of vertices pairs that would correspond to edges in :class:`bg.breakpoint_graph.BreakpointGraph`
:rtype: ``list((str, str), ...)`` | Below is the the instruction that describes the task:
### Input:
Taking into account fragment type (circular|linear) and retrieved gene order information translates adjacencies between blocks into edges for addition to the :class:`bg.breakpoint_graph.BreakpointGraph`
In case supplied fragment is linear (``$``) special artificial vertices (with ``__infinity`` suffix) are introduced to denote fragment extremities
:param parsed_data: (``$`` | ``@``, [(``+`` | ``-``, block_name),...]) formatted data about fragment type and ordered list of oriented blocks
:type parsed_data: ``tuple(str, list((str, str), ...))``
:return: a list of vertices pairs that would correspond to edges in :class:`bg.breakpoint_graph.BreakpointGraph`
:rtype: ``list((str, str), ...)``
### Response:
def get_edges_from_parsed_data(parsed_data):
""" Taking into account fragment type (circular|linear) and retrieved gene order information translates adjacencies between blocks into edges for addition to the :class:`bg.breakpoint_graph.BreakpointGraph`
In case supplied fragment is linear (``$``) special artificial vertices (with ``__infinity`` suffix) are introduced to denote fragment extremities
:param parsed_data: (``$`` | ``@``, [(``+`` | ``-``, block_name),...]) formatted data about fragment type and ordered list of oriented blocks
:type parsed_data: ``tuple(str, list((str, str), ...))``
:return: a list of vertices pairs that would correspond to edges in :class:`bg.breakpoint_graph.BreakpointGraph`
:rtype: ``list((str, str), ...)``
"""
chr_type, blocks = parsed_data
vertices = []
for block in blocks:
###############################################################################################
#
# each block is represented as a pair of vertices (that correspond to block extremities)
#
###############################################################################################
v1, v2 = GRIMMReader.__assign_vertex_pair(block)
vertices.append(v1)
vertices.append(v2)
if chr_type == "@":
###############################################################################################
#
# if we parse a circular genomic fragment we must introduce an additional pair of vertices (edge)
# that would connect two outer most vertices in the vertex list, thus connecting fragment extremities
#
###############################################################################################
vertex = vertices.pop()
vertices.insert(0, vertex)
elif chr_type == "$":
###############################################################################################
#
# if we parse linear genomic fragment, we introduce two artificial (infinity) vertices
# that correspond to fragments ends, and introduce edges between them and respective outermost block vertices
#
# if outermost vertices at this moment are repeat vertices, the outermost pair shall be discarded and the innermost
# vertex info shall be utilized in the infinity vertex, that is introduced for the fragment extremity
#
###############################################################################################
if vertices[0].is_repeat_vertex:
left_iv_tags = sorted([(tag, value) if tag != "repeat" else (tag, BGVertex.get_vertex_name_root(vertices[1].name))
for tag, value in vertices[1].tags])
left_iv_root_name = BGVertex.get_vertex_name_root(vertices[2].name)
vertices = vertices[2:]
else:
left_iv_tags = []
left_iv_root_name = vertices[0].name
if vertices[-1].is_repeat_vertex:
right_iv_tags = sorted(
[(tag, value) if tag != "repeat" else (tag, BGVertex.get_vertex_name_root(vertices[-2].name))
for tag, value in vertices[-2].tags])
right_iv_root_name = BGVertex.get_vertex_name_root(vertices[-3].name)
vertices = vertices[:-2]
else:
right_iv_tags = []
right_iv_root_name = BGVertex.get_vertex_name_root(vertices[-1].name)
left_iv, right_iv = TaggedInfinityVertex(left_iv_root_name), TaggedInfinityVertex(right_iv_root_name)
left_iv.tags = left_iv_tags
right_iv.tags = right_iv_tags
vertices.insert(0, left_iv)
vertices.append(right_iv)
return [(v1, v2) for v1, v2 in zip(vertices[::2], vertices[1::2])] |
def _byteify(input):
"""
Force the given input to only use `str` instead of `bytes` or `unicode`.
This works even if the input is a dict, list,
"""
if isinstance(input, dict):
return {_byteify(key): _byteify(value) for key, value in input.items()}
elif isinstance(input, list):
return [_byteify(element) for element in input]
elif _PYTHON_3 and isinstance(input, str):
return str(input.encode('ascii', 'replace').decode('ascii'))
elif not _PYTHON_3 and isinstance(input, unicode):
return str(input.encode('ascii', 'replace').decode('ascii'))
else:
return input | Force the given input to only use `str` instead of `bytes` or `unicode`.
This works even if the input is a dict, list, | Below is the the instruction that describes the task:
### Input:
Force the given input to only use `str` instead of `bytes` or `unicode`.
This works even if the input is a dict, list,
### Response:
def _byteify(input):
"""
Force the given input to only use `str` instead of `bytes` or `unicode`.
This works even if the input is a dict, list,
"""
if isinstance(input, dict):
return {_byteify(key): _byteify(value) for key, value in input.items()}
elif isinstance(input, list):
return [_byteify(element) for element in input]
elif _PYTHON_3 and isinstance(input, str):
return str(input.encode('ascii', 'replace').decode('ascii'))
elif not _PYTHON_3 and isinstance(input, unicode):
return str(input.encode('ascii', 'replace').decode('ascii'))
else:
return input |
def _add_dynamic_field_to_model(cls, field, field_name):
"""
Add a copy of the DynamicField "field" to the current class and its
subclasses using the "field_name" name
"""
# create the new field
new_field = field._create_dynamic_version()
new_field.name = field_name
new_field._attach_to_model(cls)
# set it as an attribute on the class, to be reachable
setattr(cls, "_redis_attr_%s" % field_name, new_field)
# NOTE: don't add the field to the "_fields" list, to avoid use extra
# memory to each future instance that will create a field for each
# dynamic one created
# # add the field to the list to avoid to done all of this again
# # (_fields is already on this class only, not subclasses)
# cls._fields.append(field_name)
# each subclass needs its own copy
for subclass in cls.__subclasses__():
subclass._add_dynamic_field_to_model(field, field_name)
return new_field | Add a copy of the DynamicField "field" to the current class and its
subclasses using the "field_name" name | Below is the the instruction that describes the task:
### Input:
Add a copy of the DynamicField "field" to the current class and its
subclasses using the "field_name" name
### Response:
def _add_dynamic_field_to_model(cls, field, field_name):
"""
Add a copy of the DynamicField "field" to the current class and its
subclasses using the "field_name" name
"""
# create the new field
new_field = field._create_dynamic_version()
new_field.name = field_name
new_field._attach_to_model(cls)
# set it as an attribute on the class, to be reachable
setattr(cls, "_redis_attr_%s" % field_name, new_field)
# NOTE: don't add the field to the "_fields" list, to avoid use extra
# memory to each future instance that will create a field for each
# dynamic one created
# # add the field to the list to avoid to done all of this again
# # (_fields is already on this class only, not subclasses)
# cls._fields.append(field_name)
# each subclass needs its own copy
for subclass in cls.__subclasses__():
subclass._add_dynamic_field_to_model(field, field_name)
return new_field |
def _get_pplan_with_watch(self, topologyName, callback, isWatching):
"""
Helper function to get pplan with
a callback. The future watch is placed
only if isWatching is True.
"""
path = self.get_pplan_path(topologyName)
if isWatching:
LOG.info("Adding data watch for path: " + path)
# pylint: disable=unused-variable, unused-argument
@self.client.DataWatch(path)
def watch_pplan(data, stats):
""" invoke callback to watch physical plan """
if data:
pplan = PhysicalPlan()
pplan.ParseFromString(data)
callback(pplan)
else:
callback(None)
# Returning False will result in no future watches
# being triggered. If isWatching is True, then
# the future watches will be triggered.
return isWatching | Helper function to get pplan with
a callback. The future watch is placed
only if isWatching is True. | Below is the the instruction that describes the task:
### Input:
Helper function to get pplan with
a callback. The future watch is placed
only if isWatching is True.
### Response:
def _get_pplan_with_watch(self, topologyName, callback, isWatching):
"""
Helper function to get pplan with
a callback. The future watch is placed
only if isWatching is True.
"""
path = self.get_pplan_path(topologyName)
if isWatching:
LOG.info("Adding data watch for path: " + path)
# pylint: disable=unused-variable, unused-argument
@self.client.DataWatch(path)
def watch_pplan(data, stats):
""" invoke callback to watch physical plan """
if data:
pplan = PhysicalPlan()
pplan.ParseFromString(data)
callback(pplan)
else:
callback(None)
# Returning False will result in no future watches
# being triggered. If isWatching is True, then
# the future watches will be triggered.
return isWatching |
async def listNamespaces(self, *args, **kwargs):
"""
List Namespaces
List the namespaces immediately under a given namespace.
This endpoint
lists up to 1000 namespaces. If more namespaces are present, a
`continuationToken` will be returned, which can be given in the next
request. For the initial request, the payload should be an empty JSON
object.
This method gives output: ``v1/list-namespaces-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["listNamespaces"], *args, **kwargs) | List Namespaces
List the namespaces immediately under a given namespace.
This endpoint
lists up to 1000 namespaces. If more namespaces are present, a
`continuationToken` will be returned, which can be given in the next
request. For the initial request, the payload should be an empty JSON
object.
This method gives output: ``v1/list-namespaces-response.json#``
This method is ``stable`` | Below is the the instruction that describes the task:
### Input:
List Namespaces
List the namespaces immediately under a given namespace.
This endpoint
lists up to 1000 namespaces. If more namespaces are present, a
`continuationToken` will be returned, which can be given in the next
request. For the initial request, the payload should be an empty JSON
object.
This method gives output: ``v1/list-namespaces-response.json#``
This method is ``stable``
### Response:
async def listNamespaces(self, *args, **kwargs):
"""
List Namespaces
List the namespaces immediately under a given namespace.
This endpoint
lists up to 1000 namespaces. If more namespaces are present, a
`continuationToken` will be returned, which can be given in the next
request. For the initial request, the payload should be an empty JSON
object.
This method gives output: ``v1/list-namespaces-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["listNamespaces"], *args, **kwargs) |
def safe_add_edge(graph, u, v, key, attr_dict, **attr):
"""Adds an edge while preserving negative keys, and paying no respect to positive ones
:param pybel.BELGraph graph: A BEL Graph
:param tuple u: The source BEL node
:param tuple v: The target BEL node
:param int key: The edge key. If less than zero, corresponds to an unqualified edge, else is disregarded
:param dict attr_dict: The edge data dictionary
:param dict attr: Edge data to assign via keyword arguments
"""
if key < 0:
graph.add_edge(u, v, key=key, attr_dict=attr_dict, **attr)
else:
graph.add_edge(u, v, attr_dict=attr_dict, **attr) | Adds an edge while preserving negative keys, and paying no respect to positive ones
:param pybel.BELGraph graph: A BEL Graph
:param tuple u: The source BEL node
:param tuple v: The target BEL node
:param int key: The edge key. If less than zero, corresponds to an unqualified edge, else is disregarded
:param dict attr_dict: The edge data dictionary
:param dict attr: Edge data to assign via keyword arguments | Below is the the instruction that describes the task:
### Input:
Adds an edge while preserving negative keys, and paying no respect to positive ones
:param pybel.BELGraph graph: A BEL Graph
:param tuple u: The source BEL node
:param tuple v: The target BEL node
:param int key: The edge key. If less than zero, corresponds to an unqualified edge, else is disregarded
:param dict attr_dict: The edge data dictionary
:param dict attr: Edge data to assign via keyword arguments
### Response:
def safe_add_edge(graph, u, v, key, attr_dict, **attr):
"""Adds an edge while preserving negative keys, and paying no respect to positive ones
:param pybel.BELGraph graph: A BEL Graph
:param tuple u: The source BEL node
:param tuple v: The target BEL node
:param int key: The edge key. If less than zero, corresponds to an unqualified edge, else is disregarded
:param dict attr_dict: The edge data dictionary
:param dict attr: Edge data to assign via keyword arguments
"""
if key < 0:
graph.add_edge(u, v, key=key, attr_dict=attr_dict, **attr)
else:
graph.add_edge(u, v, attr_dict=attr_dict, **attr) |
def create_casting_method(op, klass):
"""
Creates a new univariate special method, such as A.__float__() <=> float(A.value),
for target class. The method is called __op_name__.
"""
# This function will become the actual method.
def new_method(self, op=op):
if not check_special_methods():
raise NotImplementedError(
'Special method %s called on %s, but special methods have been disabled. Set pymc.special_methods_available to True to enable them.' %
(op_name, str(self)))
return op(self.value)
# Make the function into a method for klass.
new_method.__name__ = '__' + op.__name__ + '__'
setattr(
klass,
new_method.__name__,
UnboundMethodType(
new_method,
None,
klass)) | Creates a new univariate special method, such as A.__float__() <=> float(A.value),
for target class. The method is called __op_name__. | Below is the the instruction that describes the task:
### Input:
Creates a new univariate special method, such as A.__float__() <=> float(A.value),
for target class. The method is called __op_name__.
### Response:
def create_casting_method(op, klass):
"""
Creates a new univariate special method, such as A.__float__() <=> float(A.value),
for target class. The method is called __op_name__.
"""
# This function will become the actual method.
def new_method(self, op=op):
if not check_special_methods():
raise NotImplementedError(
'Special method %s called on %s, but special methods have been disabled. Set pymc.special_methods_available to True to enable them.' %
(op_name, str(self)))
return op(self.value)
# Make the function into a method for klass.
new_method.__name__ = '__' + op.__name__ + '__'
setattr(
klass,
new_method.__name__,
UnboundMethodType(
new_method,
None,
klass)) |
def getActiveJobCountForClientKey(self, clientKey):
""" Return the number of jobs for the given clientKey and a status that is
not completed.
"""
with ConnectionFactory.get() as conn:
query = 'SELECT count(job_id) ' \
'FROM %s ' \
'WHERE client_key = %%s ' \
' AND status != %%s' % self.jobsTableName
conn.cursor.execute(query, [clientKey, self.STATUS_COMPLETED])
activeJobCount = conn.cursor.fetchone()[0]
return activeJobCount | Return the number of jobs for the given clientKey and a status that is
not completed. | Below is the the instruction that describes the task:
### Input:
Return the number of jobs for the given clientKey and a status that is
not completed.
### Response:
def getActiveJobCountForClientKey(self, clientKey):
""" Return the number of jobs for the given clientKey and a status that is
not completed.
"""
with ConnectionFactory.get() as conn:
query = 'SELECT count(job_id) ' \
'FROM %s ' \
'WHERE client_key = %%s ' \
' AND status != %%s' % self.jobsTableName
conn.cursor.execute(query, [clientKey, self.STATUS_COMPLETED])
activeJobCount = conn.cursor.fetchone()[0]
return activeJobCount |
def detect_parser_type(cls, file, encoding=None):
'''Get the suitable parser type for the document.
Returns:
str
'''
is_xml = XMLDetector.is_file(file)
doctype = cls.parse_doctype(file, encoding=encoding) or ''
if not doctype and is_xml:
return 'xml'
if 'XHTML' in doctype:
return 'xhtml'
return 'html' | Get the suitable parser type for the document.
Returns:
str | Below is the the instruction that describes the task:
### Input:
Get the suitable parser type for the document.
Returns:
str
### Response:
def detect_parser_type(cls, file, encoding=None):
'''Get the suitable parser type for the document.
Returns:
str
'''
is_xml = XMLDetector.is_file(file)
doctype = cls.parse_doctype(file, encoding=encoding) or ''
if not doctype and is_xml:
return 'xml'
if 'XHTML' in doctype:
return 'xhtml'
return 'html' |
def _do_anchor(self, anchor):
""" Collects preposition anchors and attachments in a dictionary.
Once the dictionary has an entry for both the anchor and the attachment, they are linked.
"""
if anchor:
for x in anchor.split("-"):
A, P = None, None
if x.startswith("A") and len(self.chunks) > 0: # anchor
A, P = x, x.replace("A","P")
self._anchors[A] = self.chunks[-1]
if x.startswith("P") and len(self.pnp) > 0: # attachment (PNP)
A, P = x.replace("P","A"), x
self._anchors[P] = self.pnp[-1]
if A in self._anchors and P in self._anchors and not self._anchors[P].anchor:
pnp = self._anchors[P]
pnp.anchor = self._anchors[A]
pnp.anchor.attachments.append(pnp) | Collects preposition anchors and attachments in a dictionary.
Once the dictionary has an entry for both the anchor and the attachment, they are linked. | Below is the the instruction that describes the task:
### Input:
Collects preposition anchors and attachments in a dictionary.
Once the dictionary has an entry for both the anchor and the attachment, they are linked.
### Response:
def _do_anchor(self, anchor):
""" Collects preposition anchors and attachments in a dictionary.
Once the dictionary has an entry for both the anchor and the attachment, they are linked.
"""
if anchor:
for x in anchor.split("-"):
A, P = None, None
if x.startswith("A") and len(self.chunks) > 0: # anchor
A, P = x, x.replace("A","P")
self._anchors[A] = self.chunks[-1]
if x.startswith("P") and len(self.pnp) > 0: # attachment (PNP)
A, P = x.replace("P","A"), x
self._anchors[P] = self.pnp[-1]
if A in self._anchors and P in self._anchors and not self._anchors[P].anchor:
pnp = self._anchors[P]
pnp.anchor = self._anchors[A]
pnp.anchor.attachments.append(pnp) |
def setModified( self, state = True ):
"""
Flags the scene as being modified based on the inputed value.
:param state <bool>
"""
if ( state == self._modified ):
return
self._modified = state
self.emitModifiedStateChanged() | Flags the scene as being modified based on the inputed value.
:param state <bool> | Below is the the instruction that describes the task:
### Input:
Flags the scene as being modified based on the inputed value.
:param state <bool>
### Response:
def setModified( self, state = True ):
"""
Flags the scene as being modified based on the inputed value.
:param state <bool>
"""
if ( state == self._modified ):
return
self._modified = state
self.emitModifiedStateChanged() |
def get_methods_class(self, class_name):
"""
Return all methods of a specific class
:param class_name: the class name
:type class_name: string
:rtype: a list with :class:`EncodedMethod` objects
"""
l = []
for i in self.get_classes():
for j in i.get_methods():
if class_name == j.get_class_name():
l.append(j)
return l | Return all methods of a specific class
:param class_name: the class name
:type class_name: string
:rtype: a list with :class:`EncodedMethod` objects | Below is the the instruction that describes the task:
### Input:
Return all methods of a specific class
:param class_name: the class name
:type class_name: string
:rtype: a list with :class:`EncodedMethod` objects
### Response:
def get_methods_class(self, class_name):
"""
Return all methods of a specific class
:param class_name: the class name
:type class_name: string
:rtype: a list with :class:`EncodedMethod` objects
"""
l = []
for i in self.get_classes():
for j in i.get_methods():
if class_name == j.get_class_name():
l.append(j)
return l |
def fail(self, msg, shutit_pexpect_child=None, throw_exception=False):
"""Handles a failure, pausing if a pexpect child object is passed in.
@param shutit_pexpect_child: pexpect child to work on
@param throw_exception: Whether to throw an exception.
@type throw_exception: boolean
"""
shutit_global.shutit_global_object.yield_to_draw()
# Note: we must not default to a child here
if shutit_pexpect_child is not None:
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
shutit_util.print_debug(sys.exc_info())
shutit_pexpect_session.pause_point('Pause point on fail: ' + msg, color='31')
if throw_exception:
sys.stderr.write('Error caught: ' + msg + '\n')
sys.stderr.write('\n')
shutit_util.print_debug(sys.exc_info())
raise ShutItFailException(msg)
else:
# This is an "OK" failure, ie we don't need to throw an exception.
# However, it's still a "failure", so return 1
shutit_global.shutit_global_object.handle_exit(exit_code=1,msg=msg)
shutit_global.shutit_global_object.yield_to_draw() | Handles a failure, pausing if a pexpect child object is passed in.
@param shutit_pexpect_child: pexpect child to work on
@param throw_exception: Whether to throw an exception.
@type throw_exception: boolean | Below is the the instruction that describes the task:
### Input:
Handles a failure, pausing if a pexpect child object is passed in.
@param shutit_pexpect_child: pexpect child to work on
@param throw_exception: Whether to throw an exception.
@type throw_exception: boolean
### Response:
def fail(self, msg, shutit_pexpect_child=None, throw_exception=False):
"""Handles a failure, pausing if a pexpect child object is passed in.
@param shutit_pexpect_child: pexpect child to work on
@param throw_exception: Whether to throw an exception.
@type throw_exception: boolean
"""
shutit_global.shutit_global_object.yield_to_draw()
# Note: we must not default to a child here
if shutit_pexpect_child is not None:
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
shutit_util.print_debug(sys.exc_info())
shutit_pexpect_session.pause_point('Pause point on fail: ' + msg, color='31')
if throw_exception:
sys.stderr.write('Error caught: ' + msg + '\n')
sys.stderr.write('\n')
shutit_util.print_debug(sys.exc_info())
raise ShutItFailException(msg)
else:
# This is an "OK" failure, ie we don't need to throw an exception.
# However, it's still a "failure", so return 1
shutit_global.shutit_global_object.handle_exit(exit_code=1,msg=msg)
shutit_global.shutit_global_object.yield_to_draw() |
def generate_schema_file(config_file):
"""
Generates a basic confirm schema file from a configuration file.
"""
config = utils.load_config_from_ini_file(config_file)
schema = {}
for section_name in config:
for option_name in config[section_name]:
schema.setdefault(section_name, {}).setdefault(option_name, {})
schema[section_name][option_name]['description'] = 'No description provided.'
return utils.dump_schema_file(schema) | Generates a basic confirm schema file from a configuration file. | Below is the the instruction that describes the task:
### Input:
Generates a basic confirm schema file from a configuration file.
### Response:
def generate_schema_file(config_file):
"""
Generates a basic confirm schema file from a configuration file.
"""
config = utils.load_config_from_ini_file(config_file)
schema = {}
for section_name in config:
for option_name in config[section_name]:
schema.setdefault(section_name, {}).setdefault(option_name, {})
schema[section_name][option_name]['description'] = 'No description provided.'
return utils.dump_schema_file(schema) |
def exists(self, primary_key):
'''
a method to determine if record exists
:param primary_key: string with primary key of record
:return: boolean to indicate existence of record
'''
select_statement = self.table.select(self.table).where(self.table.c.id==primary_key)
record_object = self.session.execute(select_statement).first()
if record_object:
return True
return False | a method to determine if record exists
:param primary_key: string with primary key of record
:return: boolean to indicate existence of record | Below is the the instruction that describes the task:
### Input:
a method to determine if record exists
:param primary_key: string with primary key of record
:return: boolean to indicate existence of record
### Response:
def exists(self, primary_key):
'''
a method to determine if record exists
:param primary_key: string with primary key of record
:return: boolean to indicate existence of record
'''
select_statement = self.table.select(self.table).where(self.table.c.id==primary_key)
record_object = self.session.execute(select_statement).first()
if record_object:
return True
return False |
def _temporary_file(self, delete):
""":return: a temporary file where the content is dumped to."""
file = NamedTemporaryFile("w+", delete=delete,
encoding=self.__encoding)
self._file(file)
return file | :return: a temporary file where the content is dumped to. | Below is the the instruction that describes the task:
### Input:
:return: a temporary file where the content is dumped to.
### Response:
def _temporary_file(self, delete):
""":return: a temporary file where the content is dumped to."""
file = NamedTemporaryFile("w+", delete=delete,
encoding=self.__encoding)
self._file(file)
return file |
def view(model: "Model", *functions: Callable) -> Optional[Callable]:
"""A decorator for registering a callback to a model
Parameters:
model: the model object whose changes the callback should respond to.
Examples:
.. code-block:: python
from spectate import mvc
items = mvc.List()
@mvc.view(items)
def printer(items, events):
for e in events:
print(e)
items.append(1)
"""
if not isinstance(model, Model):
raise TypeError("Expected a Model, not %r." % model)
def setup(function: Callable):
model._model_views.append(function)
return function
if functions:
for f in functions:
setup(f)
else:
return setup | A decorator for registering a callback to a model
Parameters:
model: the model object whose changes the callback should respond to.
Examples:
.. code-block:: python
from spectate import mvc
items = mvc.List()
@mvc.view(items)
def printer(items, events):
for e in events:
print(e)
items.append(1) | Below is the the instruction that describes the task:
### Input:
A decorator for registering a callback to a model
Parameters:
model: the model object whose changes the callback should respond to.
Examples:
.. code-block:: python
from spectate import mvc
items = mvc.List()
@mvc.view(items)
def printer(items, events):
for e in events:
print(e)
items.append(1)
### Response:
def view(model: "Model", *functions: Callable) -> Optional[Callable]:
"""A decorator for registering a callback to a model
Parameters:
model: the model object whose changes the callback should respond to.
Examples:
.. code-block:: python
from spectate import mvc
items = mvc.List()
@mvc.view(items)
def printer(items, events):
for e in events:
print(e)
items.append(1)
"""
if not isinstance(model, Model):
raise TypeError("Expected a Model, not %r." % model)
def setup(function: Callable):
model._model_views.append(function)
return function
if functions:
for f in functions:
setup(f)
else:
return setup |
def new(params, event_shape=(), validate_args=False, name=None):
"""Create the distribution instance from a `params` vector."""
with tf.compat.v1.name_scope(name, 'IndependentLogistic',
[params, event_shape]):
params = tf.convert_to_tensor(value=params, name='params')
event_shape = dist_util.expand_to_vector(
tf.convert_to_tensor(
value=event_shape, name='event_shape', dtype_hint=tf.int32),
tensor_name='event_shape')
output_shape = tf.concat([
tf.shape(input=params)[:-1],
event_shape,
],
axis=0)
loc_params, scale_params = tf.split(params, 2, axis=-1)
return tfd.Independent(
tfd.Logistic(
loc=tf.reshape(loc_params, output_shape),
scale=tf.math.softplus(tf.reshape(scale_params, output_shape)),
validate_args=validate_args),
reinterpreted_batch_ndims=tf.size(input=event_shape),
validate_args=validate_args) | Create the distribution instance from a `params` vector. | Below is the the instruction that describes the task:
### Input:
Create the distribution instance from a `params` vector.
### Response:
def new(params, event_shape=(), validate_args=False, name=None):
"""Create the distribution instance from a `params` vector."""
with tf.compat.v1.name_scope(name, 'IndependentLogistic',
[params, event_shape]):
params = tf.convert_to_tensor(value=params, name='params')
event_shape = dist_util.expand_to_vector(
tf.convert_to_tensor(
value=event_shape, name='event_shape', dtype_hint=tf.int32),
tensor_name='event_shape')
output_shape = tf.concat([
tf.shape(input=params)[:-1],
event_shape,
],
axis=0)
loc_params, scale_params = tf.split(params, 2, axis=-1)
return tfd.Independent(
tfd.Logistic(
loc=tf.reshape(loc_params, output_shape),
scale=tf.math.softplus(tf.reshape(scale_params, output_shape)),
validate_args=validate_args),
reinterpreted_batch_ndims=tf.size(input=event_shape),
validate_args=validate_args) |
def _extract_package(deps_dir, pkg_path):
"""
Extract a .whl, .zip, .tar.gz or .tar.bz2 into a package path to
use when running CI tasks
:param deps_dir:
A unicode string of the directory the package should be extracted to
:param pkg_path:
A unicode string of the path to the archive
"""
if pkg_path.endswith('.exe'):
try:
zf = None
zf = zipfile.ZipFile(pkg_path, 'r')
# Exes have a PLATLIB folder containing everything we want
for zi in zf.infolist():
if not zi.filename.startswith('PLATLIB'):
continue
data = _extract_info(zf, zi)
if data is not None:
dst_path = os.path.join(deps_dir, zi.filename[8:])
dst_dir = os.path.dirname(dst_path)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
with open(dst_path, 'wb') as f:
f.write(data)
finally:
if zf:
zf.close()
return
if pkg_path.endswith('.whl'):
try:
zf = None
zf = zipfile.ZipFile(pkg_path, 'r')
# Wheels contain exactly what we need and nothing else
zf.extractall(deps_dir)
finally:
if zf:
zf.close()
return
# Source archives may contain a bunch of other things.
# The following code works for the packages coverage and
# configparser, which are the two we currently require that
# do not provide wheels
try:
ar = None
ar = _open_archive(pkg_path)
pkg_name = None
base_path = _archive_single_dir(ar) or ''
if len(base_path):
if '-' in base_path:
pkg_name, _ = base_path.split('-', 1)
base_path += '/'
base_pkg_path = None
if pkg_name is not None:
base_pkg_path = base_path + pkg_name + '/'
src_path = base_path + 'src/'
members = []
for info in _list_archive_members(ar):
fn = _info_name(info)
if base_pkg_path is not None and fn.startswith(base_pkg_path):
dst_path = fn[len(base_pkg_path) - len(pkg_name) - 1:]
members.append((info, dst_path))
continue
if fn.startswith(src_path):
members.append((info, fn[len(src_path):]))
continue
for info, path in members:
info_data = _extract_info(ar, info)
# Dirs won't return a file
if info_data is not None:
dst_path = os.path.join(deps_dir, path)
dst_dir = os.path.dirname(dst_path)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
with open(dst_path, 'wb') as f:
f.write(info_data)
finally:
if ar:
ar.close() | Extract a .whl, .zip, .tar.gz or .tar.bz2 into a package path to
use when running CI tasks
:param deps_dir:
A unicode string of the directory the package should be extracted to
:param pkg_path:
A unicode string of the path to the archive | Below is the the instruction that describes the task:
### Input:
Extract a .whl, .zip, .tar.gz or .tar.bz2 into a package path to
use when running CI tasks
:param deps_dir:
A unicode string of the directory the package should be extracted to
:param pkg_path:
A unicode string of the path to the archive
### Response:
def _extract_package(deps_dir, pkg_path):
"""
Extract a .whl, .zip, .tar.gz or .tar.bz2 into a package path to
use when running CI tasks
:param deps_dir:
A unicode string of the directory the package should be extracted to
:param pkg_path:
A unicode string of the path to the archive
"""
if pkg_path.endswith('.exe'):
try:
zf = None
zf = zipfile.ZipFile(pkg_path, 'r')
# Exes have a PLATLIB folder containing everything we want
for zi in zf.infolist():
if not zi.filename.startswith('PLATLIB'):
continue
data = _extract_info(zf, zi)
if data is not None:
dst_path = os.path.join(deps_dir, zi.filename[8:])
dst_dir = os.path.dirname(dst_path)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
with open(dst_path, 'wb') as f:
f.write(data)
finally:
if zf:
zf.close()
return
if pkg_path.endswith('.whl'):
try:
zf = None
zf = zipfile.ZipFile(pkg_path, 'r')
# Wheels contain exactly what we need and nothing else
zf.extractall(deps_dir)
finally:
if zf:
zf.close()
return
# Source archives may contain a bunch of other things.
# The following code works for the packages coverage and
# configparser, which are the two we currently require that
# do not provide wheels
try:
ar = None
ar = _open_archive(pkg_path)
pkg_name = None
base_path = _archive_single_dir(ar) or ''
if len(base_path):
if '-' in base_path:
pkg_name, _ = base_path.split('-', 1)
base_path += '/'
base_pkg_path = None
if pkg_name is not None:
base_pkg_path = base_path + pkg_name + '/'
src_path = base_path + 'src/'
members = []
for info in _list_archive_members(ar):
fn = _info_name(info)
if base_pkg_path is not None and fn.startswith(base_pkg_path):
dst_path = fn[len(base_pkg_path) - len(pkg_name) - 1:]
members.append((info, dst_path))
continue
if fn.startswith(src_path):
members.append((info, fn[len(src_path):]))
continue
for info, path in members:
info_data = _extract_info(ar, info)
# Dirs won't return a file
if info_data is not None:
dst_path = os.path.join(deps_dir, path)
dst_dir = os.path.dirname(dst_path)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
with open(dst_path, 'wb') as f:
f.write(info_data)
finally:
if ar:
ar.close() |
def send_signal_to_gdb(self, signal_input):
"""Send signal name (case insensitive) or number to gdb subprocess
gdbmi.send_signal_to_gdb(2) # valid
gdbmi.send_signal_to_gdb('sigint') # also valid
gdbmi.send_signal_to_gdb('SIGINT') # also valid
raises ValueError if signal_input is invalie
raises NoGdbProcessError if there is no gdb process to send a signal to
"""
try:
signal = int(signal_input)
except Exception:
signal = SIGNAL_NAME_TO_NUM.get(signal_input.upper())
if not signal:
raise ValueError(
'Could not find signal corresponding to "%s"' % str(signal)
)
if self.gdb_process:
os.kill(self.gdb_process.pid, signal)
else:
raise NoGdbProcessError(
"Cannot send signal to gdb process because no process exists."
) | Send signal name (case insensitive) or number to gdb subprocess
gdbmi.send_signal_to_gdb(2) # valid
gdbmi.send_signal_to_gdb('sigint') # also valid
gdbmi.send_signal_to_gdb('SIGINT') # also valid
raises ValueError if signal_input is invalie
raises NoGdbProcessError if there is no gdb process to send a signal to | Below is the the instruction that describes the task:
### Input:
Send signal name (case insensitive) or number to gdb subprocess
gdbmi.send_signal_to_gdb(2) # valid
gdbmi.send_signal_to_gdb('sigint') # also valid
gdbmi.send_signal_to_gdb('SIGINT') # also valid
raises ValueError if signal_input is invalie
raises NoGdbProcessError if there is no gdb process to send a signal to
### Response:
def send_signal_to_gdb(self, signal_input):
"""Send signal name (case insensitive) or number to gdb subprocess
gdbmi.send_signal_to_gdb(2) # valid
gdbmi.send_signal_to_gdb('sigint') # also valid
gdbmi.send_signal_to_gdb('SIGINT') # also valid
raises ValueError if signal_input is invalie
raises NoGdbProcessError if there is no gdb process to send a signal to
"""
try:
signal = int(signal_input)
except Exception:
signal = SIGNAL_NAME_TO_NUM.get(signal_input.upper())
if not signal:
raise ValueError(
'Could not find signal corresponding to "%s"' % str(signal)
)
if self.gdb_process:
os.kill(self.gdb_process.pid, signal)
else:
raise NoGdbProcessError(
"Cannot send signal to gdb process because no process exists."
) |
def contents(self):
"""
Returns entire contents of course page
:returns: response object
Example Usage::
>>> import muddle
>>> muddle.course(10).content()
"""
params = self.request_params
params.update({'wsfunction': 'core_course_get_contents',
'courseid': self.course_id})
return requests.get(self.api_url, params=params, verify=False).json() | Returns entire contents of course page
:returns: response object
Example Usage::
>>> import muddle
>>> muddle.course(10).content() | Below is the the instruction that describes the task:
### Input:
Returns entire contents of course page
:returns: response object
Example Usage::
>>> import muddle
>>> muddle.course(10).content()
### Response:
def contents(self):
"""
Returns entire contents of course page
:returns: response object
Example Usage::
>>> import muddle
>>> muddle.course(10).content()
"""
params = self.request_params
params.update({'wsfunction': 'core_course_get_contents',
'courseid': self.course_id})
return requests.get(self.api_url, params=params, verify=False).json() |
def listdir(self, path='/'):
"""Lists the contents of the specified path,
returning a 2-tuple of lists; the first item being
directories, the second item being files.
"""
container = self._get_bucket()
objects = self.driver.list_container_objects(container)
path = self._clean_name(path)
if not path.endswith('/'):
path = "%s/" % path
files = []
dirs = []
# TOFIX: better algorithm to filter correctly
# (and not depend on google-storage empty folder naming)
for o in objects:
if path == '/':
if o.name.count('/') == 0:
files.append(o.name)
elif o.name.count('/') == 1:
dir_name = o.name[:o.name.index('/')]
if dir_name not in dirs:
dirs.append(dir_name)
elif o.name.startswith(path):
if o.name.count('/') <= path.count('/'):
# TOFIX : special case for google storage with empty dir
if o.name.endswith('_$folder$'):
name = o.name[:-9]
name = name[len(path):]
dirs.append(name)
else:
name = o.name[len(path):]
files.append(name)
return (dirs, files) | Lists the contents of the specified path,
returning a 2-tuple of lists; the first item being
directories, the second item being files. | Below is the the instruction that describes the task:
### Input:
Lists the contents of the specified path,
returning a 2-tuple of lists; the first item being
directories, the second item being files.
### Response:
def listdir(self, path='/'):
"""Lists the contents of the specified path,
returning a 2-tuple of lists; the first item being
directories, the second item being files.
"""
container = self._get_bucket()
objects = self.driver.list_container_objects(container)
path = self._clean_name(path)
if not path.endswith('/'):
path = "%s/" % path
files = []
dirs = []
# TOFIX: better algorithm to filter correctly
# (and not depend on google-storage empty folder naming)
for o in objects:
if path == '/':
if o.name.count('/') == 0:
files.append(o.name)
elif o.name.count('/') == 1:
dir_name = o.name[:o.name.index('/')]
if dir_name not in dirs:
dirs.append(dir_name)
elif o.name.startswith(path):
if o.name.count('/') <= path.count('/'):
# TOFIX : special case for google storage with empty dir
if o.name.endswith('_$folder$'):
name = o.name[:-9]
name = name[len(path):]
dirs.append(name)
else:
name = o.name[len(path):]
files.append(name)
return (dirs, files) |
def prepare_next_tree(self):
"""Prepare conditional U matrix for next tree."""
for edge in self.edges:
copula_theta = edge.theta
if self.level == 1:
left_u = self.u_matrix[:, edge.L]
right_u = self.u_matrix[:, edge.R]
else:
left_parent, right_parent = edge.parents
left_u, right_u = Edge.get_conditional_uni(left_parent, right_parent)
# compute conditional cdfs C(i|j) = dC(i,j)/duj and dC(i,j)/du
left_u = [x for x in left_u if x is not None]
right_u = [x for x in right_u if x is not None]
X_left_right = np.array([[x, y] for x, y in zip(left_u, right_u)])
X_right_left = np.array([[x, y] for x, y in zip(right_u, left_u)])
copula = Bivariate(edge.name)
copula.theta = copula_theta
left_given_right = copula.partial_derivative(X_left_right)
right_given_left = copula.partial_derivative(X_right_left)
# correction of 0 or 1
left_given_right[left_given_right == 0] = EPSILON
right_given_left[right_given_left == 0] = EPSILON
left_given_right[left_given_right == 1] = 1 - EPSILON
right_given_left[right_given_left == 1] = 1 - EPSILON
edge.U = np.array([left_given_right, right_given_left]) | Prepare conditional U matrix for next tree. | Below is the the instruction that describes the task:
### Input:
Prepare conditional U matrix for next tree.
### Response:
def prepare_next_tree(self):
"""Prepare conditional U matrix for next tree."""
for edge in self.edges:
copula_theta = edge.theta
if self.level == 1:
left_u = self.u_matrix[:, edge.L]
right_u = self.u_matrix[:, edge.R]
else:
left_parent, right_parent = edge.parents
left_u, right_u = Edge.get_conditional_uni(left_parent, right_parent)
# compute conditional cdfs C(i|j) = dC(i,j)/duj and dC(i,j)/du
left_u = [x for x in left_u if x is not None]
right_u = [x for x in right_u if x is not None]
X_left_right = np.array([[x, y] for x, y in zip(left_u, right_u)])
X_right_left = np.array([[x, y] for x, y in zip(right_u, left_u)])
copula = Bivariate(edge.name)
copula.theta = copula_theta
left_given_right = copula.partial_derivative(X_left_right)
right_given_left = copula.partial_derivative(X_right_left)
# correction of 0 or 1
left_given_right[left_given_right == 0] = EPSILON
right_given_left[right_given_left == 0] = EPSILON
left_given_right[left_given_right == 1] = 1 - EPSILON
right_given_left[right_given_left == 1] = 1 - EPSILON
edge.U = np.array([left_given_right, right_given_left]) |
def _fixed_source_line(self):
"""Attempt to find the line that this node appears on.
We need this method since not all nodes have :attr:`lineno` set.
:returns: The line number of this node,
or None if this could not be determined.
:rtype: int or None
"""
line = self.lineno
_node = self
try:
while line is None:
_node = next(_node.get_children())
line = _node.lineno
except StopIteration:
_node = self.parent
while _node and line is None:
line = _node.lineno
_node = _node.parent
return line | Attempt to find the line that this node appears on.
We need this method since not all nodes have :attr:`lineno` set.
:returns: The line number of this node,
or None if this could not be determined.
:rtype: int or None | Below is the the instruction that describes the task:
### Input:
Attempt to find the line that this node appears on.
We need this method since not all nodes have :attr:`lineno` set.
:returns: The line number of this node,
or None if this could not be determined.
:rtype: int or None
### Response:
def _fixed_source_line(self):
"""Attempt to find the line that this node appears on.
We need this method since not all nodes have :attr:`lineno` set.
:returns: The line number of this node,
or None if this could not be determined.
:rtype: int or None
"""
line = self.lineno
_node = self
try:
while line is None:
_node = next(_node.get_children())
line = _node.lineno
except StopIteration:
_node = self.parent
while _node and line is None:
line = _node.lineno
_node = _node.parent
return line |
def read(self,filename,datatype=None,slaext=False,**kwargs):
'''
reader method.
:parameter filename: name of the file to load.
:keyword datatype: choose between DT/NRT/PISTACH/CTOH or other formats to call the corresponding reader. If datatype is :
* DT or NRT or PISTACH : calls :func:`altimetry.data.alti_data.read_sla` or :func:`altimetry.data.alti_data.read_slaext`
* CTOH : calls :func:`altimetry.data.alti_data.read_CTOH`
* else : calls :func:`altimetry.data.alti_data.read_nc`, based on :class:`altimetry.tools.nctools.nc` object.
:keyword slaext: force using :func:`altimetry.data.alti_data.read_slaext`
.. note:: This method is call from :meth:`altimetry.data.hydro_data.__init__` and returns a data structure to be handled by :meth:`altimetry.data.hydro_data.update_dataset`
'''
fname,extension = os.path.splitext(filename)
if os.path.basename(filename).count('.') > os.path.basename(filename).count('_'): delim='.'
else : delim = '_'
#Get data type
if datatype is None :
if os.path.basename(filename).split(delim)[0] == 'ctoh' : datatype='CTOH'
if os.path.basename(filename).split(delim)[0] == 'PISTACH' : datatype='PISTACH'
if os.path.basename(filename).split(delim)[0] == 'nrt' : datatype='NRT'
if os.path.basename(filename).split(delim)[0] == 'dt' : datatype='DT'
# else :
# datatype='RAW' #Setup default as raw NetCDF file
self.datatype=datatype
if (datatype == 'DT') | (datatype == 'NRT') | (datatype == 'PISTACH') :
if slaext : outStr=self.read_slaext(filename,datatype=datatype,**kwargs)
else : outStr=self.read_sla(filename,datatype=datatype,**kwargs)
if outStr.has_key('_dimensions'): self.update_fid_list(os.path.basename(filename),outStr['_dimensions']['time'])
elif (datatype == 'CTOH') :
outStr=self.read_CTOH(filename,**kwargs)
if outStr.has_key('_dimensions'): self.update_fid_list(os.path.basename(filename),outStr['_dimensions']['time'])
else: #Setup default as raw NetCDF file
outStr=self.read_nc(filename,**kwargs)
if outStr.has_key('_dimensions'): self.update_fid_list(os.path.basename(filename),outStr['_dimensions'][outStr['_dimensions'].keys()[1]])
return outStr | reader method.
:parameter filename: name of the file to load.
:keyword datatype: choose between DT/NRT/PISTACH/CTOH or other formats to call the corresponding reader. If datatype is :
* DT or NRT or PISTACH : calls :func:`altimetry.data.alti_data.read_sla` or :func:`altimetry.data.alti_data.read_slaext`
* CTOH : calls :func:`altimetry.data.alti_data.read_CTOH`
* else : calls :func:`altimetry.data.alti_data.read_nc`, based on :class:`altimetry.tools.nctools.nc` object.
:keyword slaext: force using :func:`altimetry.data.alti_data.read_slaext`
.. note:: This method is call from :meth:`altimetry.data.hydro_data.__init__` and returns a data structure to be handled by :meth:`altimetry.data.hydro_data.update_dataset` | Below is the the instruction that describes the task:
### Input:
reader method.
:parameter filename: name of the file to load.
:keyword datatype: choose between DT/NRT/PISTACH/CTOH or other formats to call the corresponding reader. If datatype is :
* DT or NRT or PISTACH : calls :func:`altimetry.data.alti_data.read_sla` or :func:`altimetry.data.alti_data.read_slaext`
* CTOH : calls :func:`altimetry.data.alti_data.read_CTOH`
* else : calls :func:`altimetry.data.alti_data.read_nc`, based on :class:`altimetry.tools.nctools.nc` object.
:keyword slaext: force using :func:`altimetry.data.alti_data.read_slaext`
.. note:: This method is call from :meth:`altimetry.data.hydro_data.__init__` and returns a data structure to be handled by :meth:`altimetry.data.hydro_data.update_dataset`
### Response:
def read(self,filename,datatype=None,slaext=False,**kwargs):
'''
reader method.
:parameter filename: name of the file to load.
:keyword datatype: choose between DT/NRT/PISTACH/CTOH or other formats to call the corresponding reader. If datatype is :
* DT or NRT or PISTACH : calls :func:`altimetry.data.alti_data.read_sla` or :func:`altimetry.data.alti_data.read_slaext`
* CTOH : calls :func:`altimetry.data.alti_data.read_CTOH`
* else : calls :func:`altimetry.data.alti_data.read_nc`, based on :class:`altimetry.tools.nctools.nc` object.
:keyword slaext: force using :func:`altimetry.data.alti_data.read_slaext`
.. note:: This method is call from :meth:`altimetry.data.hydro_data.__init__` and returns a data structure to be handled by :meth:`altimetry.data.hydro_data.update_dataset`
'''
fname,extension = os.path.splitext(filename)
if os.path.basename(filename).count('.') > os.path.basename(filename).count('_'): delim='.'
else : delim = '_'
#Get data type
if datatype is None :
if os.path.basename(filename).split(delim)[0] == 'ctoh' : datatype='CTOH'
if os.path.basename(filename).split(delim)[0] == 'PISTACH' : datatype='PISTACH'
if os.path.basename(filename).split(delim)[0] == 'nrt' : datatype='NRT'
if os.path.basename(filename).split(delim)[0] == 'dt' : datatype='DT'
# else :
# datatype='RAW' #Setup default as raw NetCDF file
self.datatype=datatype
if (datatype == 'DT') | (datatype == 'NRT') | (datatype == 'PISTACH') :
if slaext : outStr=self.read_slaext(filename,datatype=datatype,**kwargs)
else : outStr=self.read_sla(filename,datatype=datatype,**kwargs)
if outStr.has_key('_dimensions'): self.update_fid_list(os.path.basename(filename),outStr['_dimensions']['time'])
elif (datatype == 'CTOH') :
outStr=self.read_CTOH(filename,**kwargs)
if outStr.has_key('_dimensions'): self.update_fid_list(os.path.basename(filename),outStr['_dimensions']['time'])
else: #Setup default as raw NetCDF file
outStr=self.read_nc(filename,**kwargs)
if outStr.has_key('_dimensions'): self.update_fid_list(os.path.basename(filename),outStr['_dimensions'][outStr['_dimensions'].keys()[1]])
return outStr |
def listify(values, N=1, delim=None):
"""Return an N-length list, with elements values, extrapolating as necessary.
>>> listify("don't split into characters")
["don't split into characters"]
>>> listify("len = 3", 3)
['len = 3', 'len = 3', 'len = 3']
>>> listify("But split on a delimeter, if requested.", delim=',')
['But split on a delimeter', ' if requested.']
>>> listify(["obj 1", "obj 2", "len = 4"], N=4)
['obj 1', 'obj 2', 'len = 4', 'len = 4']
>>> listify(iter("len=7"), N=7)
['l', 'e', 'n', '=', '7', '7', '7']
>>> listify(iter("len=5"))
['l', 'e', 'n', '=', '5']
>>> listify(None, 3)
[[], [], []]
>>> listify([None],3)
[None, None, None]
>>> listify([], 3)
[[], [], []]
>>> listify('', 2)
['', '']
>>> listify(0)
[0]
>>> listify(False, 2)
[False, False]
"""
ans = [] if values is None else values
# convert non-string non-list iterables into a list
if hasattr(ans, '__iter__') and not isinstance(ans, basestring):
ans = list(ans)
else:
# split the string (if possible)
if isinstance(delim, basestring) and isinstance(ans, basestring):
try:
ans = ans.split(delim)
except (IndexError, ValueError, AttributeError, TypeError):
ans = [ans]
else:
ans = [ans]
# pad the end of the list if a length has been specified
if len(ans):
if len(ans) < N and N > 1:
ans += [ans[-1]] * (N - len(ans))
else:
if N > 1:
ans = [[]] * N
return ans | Return an N-length list, with elements values, extrapolating as necessary.
>>> listify("don't split into characters")
["don't split into characters"]
>>> listify("len = 3", 3)
['len = 3', 'len = 3', 'len = 3']
>>> listify("But split on a delimeter, if requested.", delim=',')
['But split on a delimeter', ' if requested.']
>>> listify(["obj 1", "obj 2", "len = 4"], N=4)
['obj 1', 'obj 2', 'len = 4', 'len = 4']
>>> listify(iter("len=7"), N=7)
['l', 'e', 'n', '=', '7', '7', '7']
>>> listify(iter("len=5"))
['l', 'e', 'n', '=', '5']
>>> listify(None, 3)
[[], [], []]
>>> listify([None],3)
[None, None, None]
>>> listify([], 3)
[[], [], []]
>>> listify('', 2)
['', '']
>>> listify(0)
[0]
>>> listify(False, 2)
[False, False] | Below is the the instruction that describes the task:
### Input:
Return an N-length list, with elements values, extrapolating as necessary.
>>> listify("don't split into characters")
["don't split into characters"]
>>> listify("len = 3", 3)
['len = 3', 'len = 3', 'len = 3']
>>> listify("But split on a delimeter, if requested.", delim=',')
['But split on a delimeter', ' if requested.']
>>> listify(["obj 1", "obj 2", "len = 4"], N=4)
['obj 1', 'obj 2', 'len = 4', 'len = 4']
>>> listify(iter("len=7"), N=7)
['l', 'e', 'n', '=', '7', '7', '7']
>>> listify(iter("len=5"))
['l', 'e', 'n', '=', '5']
>>> listify(None, 3)
[[], [], []]
>>> listify([None],3)
[None, None, None]
>>> listify([], 3)
[[], [], []]
>>> listify('', 2)
['', '']
>>> listify(0)
[0]
>>> listify(False, 2)
[False, False]
### Response:
def listify(values, N=1, delim=None):
"""Return an N-length list, with elements values, extrapolating as necessary.
>>> listify("don't split into characters")
["don't split into characters"]
>>> listify("len = 3", 3)
['len = 3', 'len = 3', 'len = 3']
>>> listify("But split on a delimeter, if requested.", delim=',')
['But split on a delimeter', ' if requested.']
>>> listify(["obj 1", "obj 2", "len = 4"], N=4)
['obj 1', 'obj 2', 'len = 4', 'len = 4']
>>> listify(iter("len=7"), N=7)
['l', 'e', 'n', '=', '7', '7', '7']
>>> listify(iter("len=5"))
['l', 'e', 'n', '=', '5']
>>> listify(None, 3)
[[], [], []]
>>> listify([None],3)
[None, None, None]
>>> listify([], 3)
[[], [], []]
>>> listify('', 2)
['', '']
>>> listify(0)
[0]
>>> listify(False, 2)
[False, False]
"""
ans = [] if values is None else values
# convert non-string non-list iterables into a list
if hasattr(ans, '__iter__') and not isinstance(ans, basestring):
ans = list(ans)
else:
# split the string (if possible)
if isinstance(delim, basestring) and isinstance(ans, basestring):
try:
ans = ans.split(delim)
except (IndexError, ValueError, AttributeError, TypeError):
ans = [ans]
else:
ans = [ans]
# pad the end of the list if a length has been specified
if len(ans):
if len(ans) < N and N > 1:
ans += [ans[-1]] * (N - len(ans))
else:
if N > 1:
ans = [[]] * N
return ans |
def remote_getWorkerInfo(self):
"""This command retrieves data from the files in WORKERDIR/info/* and
sends the contents to the buildmaster. These are used to describe
the worker and its configuration, and should be created and
maintained by the worker administrator. They will be retrieved each
time the master-worker connection is established.
"""
files = {}
basedir = os.path.join(self.basedir, "info")
if os.path.isdir(basedir):
for f in os.listdir(basedir):
filename = os.path.join(basedir, f)
if os.path.isfile(filename):
with open(filename, "r") as fin:
files[f] = fin.read()
if not self.numcpus:
try:
self.numcpus = multiprocessing.cpu_count()
except NotImplementedError:
log.msg("warning: could not detect the number of CPUs for "
"this worker. Assuming 1 CPU.")
self.numcpus = 1
files['environ'] = os.environ.copy()
files['system'] = os.name
files['basedir'] = self.basedir
files['numcpus'] = self.numcpus
files['version'] = self.remote_getVersion()
files['worker_commands'] = self.remote_getCommands()
return files | This command retrieves data from the files in WORKERDIR/info/* and
sends the contents to the buildmaster. These are used to describe
the worker and its configuration, and should be created and
maintained by the worker administrator. They will be retrieved each
time the master-worker connection is established. | Below is the the instruction that describes the task:
### Input:
This command retrieves data from the files in WORKERDIR/info/* and
sends the contents to the buildmaster. These are used to describe
the worker and its configuration, and should be created and
maintained by the worker administrator. They will be retrieved each
time the master-worker connection is established.
### Response:
def remote_getWorkerInfo(self):
"""This command retrieves data from the files in WORKERDIR/info/* and
sends the contents to the buildmaster. These are used to describe
the worker and its configuration, and should be created and
maintained by the worker administrator. They will be retrieved each
time the master-worker connection is established.
"""
files = {}
basedir = os.path.join(self.basedir, "info")
if os.path.isdir(basedir):
for f in os.listdir(basedir):
filename = os.path.join(basedir, f)
if os.path.isfile(filename):
with open(filename, "r") as fin:
files[f] = fin.read()
if not self.numcpus:
try:
self.numcpus = multiprocessing.cpu_count()
except NotImplementedError:
log.msg("warning: could not detect the number of CPUs for "
"this worker. Assuming 1 CPU.")
self.numcpus = 1
files['environ'] = os.environ.copy()
files['system'] = os.name
files['basedir'] = self.basedir
files['numcpus'] = self.numcpus
files['version'] = self.remote_getVersion()
files['worker_commands'] = self.remote_getCommands()
return files |
def save_data(self, idx):
"""Save the internal data of all sequences with an activated flag.
Write to file if the corresponding disk flag is activated; store
in working memory if the corresponding ram flag is activated."""
for name in self:
actual = getattr(self, name)
diskflag = getattr(self, '_%s_diskflag' % name)
ramflag = getattr(self, '_%s_ramflag' % name)
if diskflag:
file_ = getattr(self, '_%s_file' % name)
ndim = getattr(self, '_%s_ndim' % name)
length_tot = 1
for jdx in range(ndim):
length = getattr(self, '_%s_length_%s' % (name, jdx))
length_tot *= length
if ndim:
raw = struct.pack(length_tot*'d', *actual.flatten())
else:
raw = struct.pack('d', actual)
file_.write(raw)
elif ramflag:
array = getattr(self, '_%s_array' % name)
array[idx] = actual | Save the internal data of all sequences with an activated flag.
Write to file if the corresponding disk flag is activated; store
in working memory if the corresponding ram flag is activated. | Below is the the instruction that describes the task:
### Input:
Save the internal data of all sequences with an activated flag.
Write to file if the corresponding disk flag is activated; store
in working memory if the corresponding ram flag is activated.
### Response:
def save_data(self, idx):
"""Save the internal data of all sequences with an activated flag.
Write to file if the corresponding disk flag is activated; store
in working memory if the corresponding ram flag is activated."""
for name in self:
actual = getattr(self, name)
diskflag = getattr(self, '_%s_diskflag' % name)
ramflag = getattr(self, '_%s_ramflag' % name)
if diskflag:
file_ = getattr(self, '_%s_file' % name)
ndim = getattr(self, '_%s_ndim' % name)
length_tot = 1
for jdx in range(ndim):
length = getattr(self, '_%s_length_%s' % (name, jdx))
length_tot *= length
if ndim:
raw = struct.pack(length_tot*'d', *actual.flatten())
else:
raw = struct.pack('d', actual)
file_.write(raw)
elif ramflag:
array = getattr(self, '_%s_array' % name)
array[idx] = actual |
def output_hist(self, output_hist: Hist, input_observable: Any, **kwargs: Dict[str, Any]) -> Union[Hist, Any]:
""" Return an output object. It should store the ``output_hist``.
Note:
The output object could just be the raw histogram.
Note:
This function is just a basic placeholder which returns the given output object (a histogram)
and likely should be overridden.
Args:
output_hist: The output histogram
input_observable (object): The corresponding input object. It could be a histogram or something
more complex.
kwargs: Projection information dict combined with additional arguments passed to the
projection function
Return:
The output object which should be stored in the output dict. By default, it returns the
output hist.
"""
return output_hist | Return an output object. It should store the ``output_hist``.
Note:
The output object could just be the raw histogram.
Note:
This function is just a basic placeholder which returns the given output object (a histogram)
and likely should be overridden.
Args:
output_hist: The output histogram
input_observable (object): The corresponding input object. It could be a histogram or something
more complex.
kwargs: Projection information dict combined with additional arguments passed to the
projection function
Return:
The output object which should be stored in the output dict. By default, it returns the
output hist. | Below is the the instruction that describes the task:
### Input:
Return an output object. It should store the ``output_hist``.
Note:
The output object could just be the raw histogram.
Note:
This function is just a basic placeholder which returns the given output object (a histogram)
and likely should be overridden.
Args:
output_hist: The output histogram
input_observable (object): The corresponding input object. It could be a histogram or something
more complex.
kwargs: Projection information dict combined with additional arguments passed to the
projection function
Return:
The output object which should be stored in the output dict. By default, it returns the
output hist.
### Response:
def output_hist(self, output_hist: Hist, input_observable: Any, **kwargs: Dict[str, Any]) -> Union[Hist, Any]:
""" Return an output object. It should store the ``output_hist``.
Note:
The output object could just be the raw histogram.
Note:
This function is just a basic placeholder which returns the given output object (a histogram)
and likely should be overridden.
Args:
output_hist: The output histogram
input_observable (object): The corresponding input object. It could be a histogram or something
more complex.
kwargs: Projection information dict combined with additional arguments passed to the
projection function
Return:
The output object which should be stored in the output dict. By default, it returns the
output hist.
"""
return output_hist |
def cli(ctx, project_dir):
"""Verify the verilog code."""
exit_code = SCons(project_dir).verify()
ctx.exit(exit_code) | Verify the verilog code. | Below is the the instruction that describes the task:
### Input:
Verify the verilog code.
### Response:
def cli(ctx, project_dir):
"""Verify the verilog code."""
exit_code = SCons(project_dir).verify()
ctx.exit(exit_code) |
def override_config(self, path):
"""
Will take a yml located in home directory titled '.plugin_config.yml'.
It'll then override, using the yml, the plugin's config file
"""
status = (True, None)
config_override = False
try:
# parse the yml file
c_dict = {}
if exists(self.plugin_config_file):
with open(self.plugin_config_file, 'r') as config_file:
c_dict = yaml.safe_load(config_file.read())
# check for environment variable overrides
check_c_dict = c_dict.copy()
for tool in check_c_dict:
for section in check_c_dict[tool]:
for key in check_c_dict[tool][section]:
if key in environ:
c_dict[tool][section][key] = getenv(key)
# assume the name of the plugin is its directory
plugin_name = path.split('/')[-1]
if plugin_name == '':
plugin_name = path.split('/')[-2]
plugin_config_path = path + '/config/' + plugin_name + '.config'
if exists(plugin_config_path):
plugin_template = Template(plugin_config_path)
plugin_options = c_dict[plugin_name]
for section in plugin_options:
for option in plugin_options[section]:
plugin_template.set_option(section, option,
str(plugin_options[section][option]))
plugin_template.write_config()
config_override = True
except Exception as e: # pragma: no cover
status = (False, str(e))
return status, config_override | Will take a yml located in home directory titled '.plugin_config.yml'.
It'll then override, using the yml, the plugin's config file | Below is the the instruction that describes the task:
### Input:
Will take a yml located in home directory titled '.plugin_config.yml'.
It'll then override, using the yml, the plugin's config file
### Response:
def override_config(self, path):
"""
Will take a yml located in home directory titled '.plugin_config.yml'.
It'll then override, using the yml, the plugin's config file
"""
status = (True, None)
config_override = False
try:
# parse the yml file
c_dict = {}
if exists(self.plugin_config_file):
with open(self.plugin_config_file, 'r') as config_file:
c_dict = yaml.safe_load(config_file.read())
# check for environment variable overrides
check_c_dict = c_dict.copy()
for tool in check_c_dict:
for section in check_c_dict[tool]:
for key in check_c_dict[tool][section]:
if key in environ:
c_dict[tool][section][key] = getenv(key)
# assume the name of the plugin is its directory
plugin_name = path.split('/')[-1]
if plugin_name == '':
plugin_name = path.split('/')[-2]
plugin_config_path = path + '/config/' + plugin_name + '.config'
if exists(plugin_config_path):
plugin_template = Template(plugin_config_path)
plugin_options = c_dict[plugin_name]
for section in plugin_options:
for option in plugin_options[section]:
plugin_template.set_option(section, option,
str(plugin_options[section][option]))
plugin_template.write_config()
config_override = True
except Exception as e: # pragma: no cover
status = (False, str(e))
return status, config_override |
def get_target_error(self,i):
"""Just get a single error characterization based on the index relative to the target
:param i: list index
:type i: int
:returns: base-wise error
:rtype: HPA group description
"""
x = self._target_hpas[i]
h = x['hpa']
pos = x['pos']
prob = 0
be = BaseError('target')
be.set_observable(h.get_target(),h.get_query())
if i != 0 and pos == 0: # check for a total deletion before
prev = x['prev-hpa']
if len(prev.get_target()) == 0: # total insertion
ilen = len(prev.get_query())
be.set_unobserved_before(0,len(prev.get_query()),prev.get_query()[0],0.5)
if i != len(self._target_hpas)-1 and pos == len(h.get_target())-1: # check for a total deletion before
if x['next-hpa']:
foll = x['next-hpa']
if len(foll.get_target()) == 0: # total insertion
be.set_unobserved_after(0,len(foll.get_query()),foll.get_query()[0],0.5)
return be | Just get a single error characterization based on the index relative to the target
:param i: list index
:type i: int
:returns: base-wise error
:rtype: HPA group description | Below is the the instruction that describes the task:
### Input:
Just get a single error characterization based on the index relative to the target
:param i: list index
:type i: int
:returns: base-wise error
:rtype: HPA group description
### Response:
def get_target_error(self,i):
"""Just get a single error characterization based on the index relative to the target
:param i: list index
:type i: int
:returns: base-wise error
:rtype: HPA group description
"""
x = self._target_hpas[i]
h = x['hpa']
pos = x['pos']
prob = 0
be = BaseError('target')
be.set_observable(h.get_target(),h.get_query())
if i != 0 and pos == 0: # check for a total deletion before
prev = x['prev-hpa']
if len(prev.get_target()) == 0: # total insertion
ilen = len(prev.get_query())
be.set_unobserved_before(0,len(prev.get_query()),prev.get_query()[0],0.5)
if i != len(self._target_hpas)-1 and pos == len(h.get_target())-1: # check for a total deletion before
if x['next-hpa']:
foll = x['next-hpa']
if len(foll.get_target()) == 0: # total insertion
be.set_unobserved_after(0,len(foll.get_query()),foll.get_query()[0],0.5)
return be |
def autopilot_version_send(self, capabilities, flight_sw_version, middleware_sw_version, os_sw_version, board_version, flight_custom_version, middleware_custom_version, os_custom_version, vendor_id, product_id, uid, force_mavlink1=False):
'''
Version and capability of autopilot software
capabilities : bitmask of capabilities (see MAV_PROTOCOL_CAPABILITY enum) (uint64_t)
flight_sw_version : Firmware version number (uint32_t)
middleware_sw_version : Middleware version number (uint32_t)
os_sw_version : Operating system version number (uint32_t)
board_version : HW / board version (last 8 bytes should be silicon ID, if any) (uint32_t)
flight_custom_version : Custom version field, commonly the first 8 bytes of the git hash. This is not an unique identifier, but should allow to identify the commit using the main version number even for very large code bases. (uint8_t)
middleware_custom_version : Custom version field, commonly the first 8 bytes of the git hash. This is not an unique identifier, but should allow to identify the commit using the main version number even for very large code bases. (uint8_t)
os_custom_version : Custom version field, commonly the first 8 bytes of the git hash. This is not an unique identifier, but should allow to identify the commit using the main version number even for very large code bases. (uint8_t)
vendor_id : ID of the board vendor (uint16_t)
product_id : ID of the product (uint16_t)
uid : UID if provided by hardware (uint64_t)
'''
return self.send(self.autopilot_version_encode(capabilities, flight_sw_version, middleware_sw_version, os_sw_version, board_version, flight_custom_version, middleware_custom_version, os_custom_version, vendor_id, product_id, uid), force_mavlink1=force_mavlink1) | Version and capability of autopilot software
capabilities : bitmask of capabilities (see MAV_PROTOCOL_CAPABILITY enum) (uint64_t)
flight_sw_version : Firmware version number (uint32_t)
middleware_sw_version : Middleware version number (uint32_t)
os_sw_version : Operating system version number (uint32_t)
board_version : HW / board version (last 8 bytes should be silicon ID, if any) (uint32_t)
flight_custom_version : Custom version field, commonly the first 8 bytes of the git hash. This is not an unique identifier, but should allow to identify the commit using the main version number even for very large code bases. (uint8_t)
middleware_custom_version : Custom version field, commonly the first 8 bytes of the git hash. This is not an unique identifier, but should allow to identify the commit using the main version number even for very large code bases. (uint8_t)
os_custom_version : Custom version field, commonly the first 8 bytes of the git hash. This is not an unique identifier, but should allow to identify the commit using the main version number even for very large code bases. (uint8_t)
vendor_id : ID of the board vendor (uint16_t)
product_id : ID of the product (uint16_t)
uid : UID if provided by hardware (uint64_t) | Below is the the instruction that describes the task:
### Input:
Version and capability of autopilot software
capabilities : bitmask of capabilities (see MAV_PROTOCOL_CAPABILITY enum) (uint64_t)
flight_sw_version : Firmware version number (uint32_t)
middleware_sw_version : Middleware version number (uint32_t)
os_sw_version : Operating system version number (uint32_t)
board_version : HW / board version (last 8 bytes should be silicon ID, if any) (uint32_t)
flight_custom_version : Custom version field, commonly the first 8 bytes of the git hash. This is not an unique identifier, but should allow to identify the commit using the main version number even for very large code bases. (uint8_t)
middleware_custom_version : Custom version field, commonly the first 8 bytes of the git hash. This is not an unique identifier, but should allow to identify the commit using the main version number even for very large code bases. (uint8_t)
os_custom_version : Custom version field, commonly the first 8 bytes of the git hash. This is not an unique identifier, but should allow to identify the commit using the main version number even for very large code bases. (uint8_t)
vendor_id : ID of the board vendor (uint16_t)
product_id : ID of the product (uint16_t)
uid : UID if provided by hardware (uint64_t)
### Response:
def autopilot_version_send(self, capabilities, flight_sw_version, middleware_sw_version, os_sw_version, board_version, flight_custom_version, middleware_custom_version, os_custom_version, vendor_id, product_id, uid, force_mavlink1=False):
'''
Version and capability of autopilot software
capabilities : bitmask of capabilities (see MAV_PROTOCOL_CAPABILITY enum) (uint64_t)
flight_sw_version : Firmware version number (uint32_t)
middleware_sw_version : Middleware version number (uint32_t)
os_sw_version : Operating system version number (uint32_t)
board_version : HW / board version (last 8 bytes should be silicon ID, if any) (uint32_t)
flight_custom_version : Custom version field, commonly the first 8 bytes of the git hash. This is not an unique identifier, but should allow to identify the commit using the main version number even for very large code bases. (uint8_t)
middleware_custom_version : Custom version field, commonly the first 8 bytes of the git hash. This is not an unique identifier, but should allow to identify the commit using the main version number even for very large code bases. (uint8_t)
os_custom_version : Custom version field, commonly the first 8 bytes of the git hash. This is not an unique identifier, but should allow to identify the commit using the main version number even for very large code bases. (uint8_t)
vendor_id : ID of the board vendor (uint16_t)
product_id : ID of the product (uint16_t)
uid : UID if provided by hardware (uint64_t)
'''
return self.send(self.autopilot_version_encode(capabilities, flight_sw_version, middleware_sw_version, os_sw_version, board_version, flight_custom_version, middleware_custom_version, os_custom_version, vendor_id, product_id, uid), force_mavlink1=force_mavlink1) |
def get_array_size(self):
"""Attempts to parse array size out of the address"""
match = re.search(r"(?<=\[)\d+(?=\])", self.get_address())
return int(match.group(0)) | Attempts to parse array size out of the address | Below is the the instruction that describes the task:
### Input:
Attempts to parse array size out of the address
### Response:
def get_array_size(self):
"""Attempts to parse array size out of the address"""
match = re.search(r"(?<=\[)\d+(?=\])", self.get_address())
return int(match.group(0)) |
def msg_curse(self, args=None, max_width=None):
"""Return the dict to display in the curse interface."""
# Init the return message
ret = []
# Only process if stats exist, not empty (issue #871) and plugin not disabled
if not self.stats or (self.stats == {}) or self.is_disable():
return ret
# Build the string message
# Header
msg = '{:8}'.format('LOAD')
ret.append(self.curse_add_line(msg, "TITLE"))
# Core number
if 'cpucore' in self.stats and self.stats['cpucore'] > 0:
msg = '{}-core'.format(int(self.stats['cpucore']))
ret.append(self.curse_add_line(msg))
# New line
ret.append(self.curse_new_line())
# 1min load
msg = '{:8}'.format('1 min:')
ret.append(self.curse_add_line(msg))
msg = '{:>6.2f}'.format(self.stats['min1'])
ret.append(self.curse_add_line(msg))
# New line
ret.append(self.curse_new_line())
# 5min load
msg = '{:8}'.format('5 min:')
ret.append(self.curse_add_line(msg))
msg = '{:>6.2f}'.format(self.stats['min5'])
ret.append(self.curse_add_line(
msg, self.get_views(key='min5', option='decoration')))
# New line
ret.append(self.curse_new_line())
# 15min load
msg = '{:8}'.format('15 min:')
ret.append(self.curse_add_line(msg))
msg = '{:>6.2f}'.format(self.stats['min15'])
ret.append(self.curse_add_line(
msg, self.get_views(key='min15', option='decoration')))
return ret | Return the dict to display in the curse interface. | Below is the the instruction that describes the task:
### Input:
Return the dict to display in the curse interface.
### Response:
def msg_curse(self, args=None, max_width=None):
"""Return the dict to display in the curse interface."""
# Init the return message
ret = []
# Only process if stats exist, not empty (issue #871) and plugin not disabled
if not self.stats or (self.stats == {}) or self.is_disable():
return ret
# Build the string message
# Header
msg = '{:8}'.format('LOAD')
ret.append(self.curse_add_line(msg, "TITLE"))
# Core number
if 'cpucore' in self.stats and self.stats['cpucore'] > 0:
msg = '{}-core'.format(int(self.stats['cpucore']))
ret.append(self.curse_add_line(msg))
# New line
ret.append(self.curse_new_line())
# 1min load
msg = '{:8}'.format('1 min:')
ret.append(self.curse_add_line(msg))
msg = '{:>6.2f}'.format(self.stats['min1'])
ret.append(self.curse_add_line(msg))
# New line
ret.append(self.curse_new_line())
# 5min load
msg = '{:8}'.format('5 min:')
ret.append(self.curse_add_line(msg))
msg = '{:>6.2f}'.format(self.stats['min5'])
ret.append(self.curse_add_line(
msg, self.get_views(key='min5', option='decoration')))
# New line
ret.append(self.curse_new_line())
# 15min load
msg = '{:8}'.format('15 min:')
ret.append(self.curse_add_line(msg))
msg = '{:>6.2f}'.format(self.stats['min15'])
ret.append(self.curse_add_line(
msg, self.get_views(key='min15', option='decoration')))
return ret |
def write_template_to_file(conf, template_body):
"""Writes the template to disk
"""
template_file_name = _get_stack_name(conf) + '-generated-cf-template.json'
with open(template_file_name, 'w') as opened_file:
opened_file.write(template_body)
print('wrote cf-template for %s to disk: %s' % (
get_env(), template_file_name))
return template_file_name | Writes the template to disk | Below is the the instruction that describes the task:
### Input:
Writes the template to disk
### Response:
def write_template_to_file(conf, template_body):
"""Writes the template to disk
"""
template_file_name = _get_stack_name(conf) + '-generated-cf-template.json'
with open(template_file_name, 'w') as opened_file:
opened_file.write(template_body)
print('wrote cf-template for %s to disk: %s' % (
get_env(), template_file_name))
return template_file_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.