code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def close(self) -> Awaitable[None]:
"""Close all ongoing DNS calls."""
for ev in self._throttle_dns_events.values():
ev.cancel()
return super().close() | Close all ongoing DNS calls. | Below is the the instruction that describes the task:
### Input:
Close all ongoing DNS calls.
### Response:
def close(self) -> Awaitable[None]:
"""Close all ongoing DNS calls."""
for ev in self._throttle_dns_events.values():
ev.cancel()
return super().close() |
def pairplot(dataset, vars, filename, bins=60):
""" Plot a matrix of the specified variables with all the 2D pdfs and 1D pdfs.
"""
n = len(vars)
fig, axes = plt.subplots(nrows=n, ncols=n)
plt.subplots_adjust(wspace=0.1, hspace=0.1)
for i, x in enumerate(vars):
for j, y in enumerate(vars):
print(((x, y), (i, j)))
ax = axes[j,i]
if j < i:
ax.axis('off')
continue
elif i == j:
P = posterior.oneD(dataset+'.h5', x, limits=limits(x), bins=bins)
P.plot(ax)
ax.set_xlim(limits(x))
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.set_yticks([])
else:
P = posterior.twoD(dataset+'.h5', x, y,
xlimits=limits(x), ylimits=limits(y), xbins=bins, ybins=bins)
# apply some gaussian smoothing to make the contours slightly smoother
sigmas = (np.diff(P.ycenters)[0], np.diff(P.xcenters)[0])
P.pdf = gaussian_filter(P.pdf, sigmas, mode='nearest')
P.plot(ax, levels=np.linspace(0.9, 0.1, 9))
ax.set_xlim(limits(x))
ax.set_ylim(limits(y))
# now we clean up labels, ticks and such
leftmostcol = i == 0
bottomrow = j == n-1
ax.set_xlabel(labels(x) if bottomrow else '')
ax.set_ylabel(labels(y) if leftmostcol else '')
if not leftmostcol:
ax.set_yticklabels([])
if not bottomrow:
ax.set_xticklabels([])
fig.set_size_inches(n*4,n*4)
fig.savefig(filename, dpi=200, bbox_inches='tight')
plt.close(fig) | Plot a matrix of the specified variables with all the 2D pdfs and 1D pdfs. | Below is the the instruction that describes the task:
### Input:
Plot a matrix of the specified variables with all the 2D pdfs and 1D pdfs.
### Response:
def pairplot(dataset, vars, filename, bins=60):
""" Plot a matrix of the specified variables with all the 2D pdfs and 1D pdfs.
"""
n = len(vars)
fig, axes = plt.subplots(nrows=n, ncols=n)
plt.subplots_adjust(wspace=0.1, hspace=0.1)
for i, x in enumerate(vars):
for j, y in enumerate(vars):
print(((x, y), (i, j)))
ax = axes[j,i]
if j < i:
ax.axis('off')
continue
elif i == j:
P = posterior.oneD(dataset+'.h5', x, limits=limits(x), bins=bins)
P.plot(ax)
ax.set_xlim(limits(x))
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.set_yticks([])
else:
P = posterior.twoD(dataset+'.h5', x, y,
xlimits=limits(x), ylimits=limits(y), xbins=bins, ybins=bins)
# apply some gaussian smoothing to make the contours slightly smoother
sigmas = (np.diff(P.ycenters)[0], np.diff(P.xcenters)[0])
P.pdf = gaussian_filter(P.pdf, sigmas, mode='nearest')
P.plot(ax, levels=np.linspace(0.9, 0.1, 9))
ax.set_xlim(limits(x))
ax.set_ylim(limits(y))
# now we clean up labels, ticks and such
leftmostcol = i == 0
bottomrow = j == n-1
ax.set_xlabel(labels(x) if bottomrow else '')
ax.set_ylabel(labels(y) if leftmostcol else '')
if not leftmostcol:
ax.set_yticklabels([])
if not bottomrow:
ax.set_xticklabels([])
fig.set_size_inches(n*4,n*4)
fig.savefig(filename, dpi=200, bbox_inches='tight')
plt.close(fig) |
def convert(gr, raw_node):
"""
Convert raw node information to a Node or Leaf instance.
This is passed to the parser driver which calls it whenever a reduction of a
grammar rule produces a new complete node, so that the tree is build
strictly bottom-up.
"""
type, value, context, children = raw_node
if children or type in gr.number2symbol:
# If there's exactly one child, return that child instead of
# creating a new node.
if len(children) == 1:
return children[0]
return Node(type, children, context=context)
else:
return Leaf(type, value, context=context) | Convert raw node information to a Node or Leaf instance.
This is passed to the parser driver which calls it whenever a reduction of a
grammar rule produces a new complete node, so that the tree is build
strictly bottom-up. | Below is the the instruction that describes the task:
### Input:
Convert raw node information to a Node or Leaf instance.
This is passed to the parser driver which calls it whenever a reduction of a
grammar rule produces a new complete node, so that the tree is build
strictly bottom-up.
### Response:
def convert(gr, raw_node):
"""
Convert raw node information to a Node or Leaf instance.
This is passed to the parser driver which calls it whenever a reduction of a
grammar rule produces a new complete node, so that the tree is build
strictly bottom-up.
"""
type, value, context, children = raw_node
if children or type in gr.number2symbol:
# If there's exactly one child, return that child instead of
# creating a new node.
if len(children) == 1:
return children[0]
return Node(type, children, context=context)
else:
return Leaf(type, value, context=context) |
def bytes(num, check_result=False):
"""
Returns num bytes of cryptographically strong pseudo-random
bytes. If checkc_result is True, raises error if PRNG is not
seeded enough
"""
if num <= 0:
raise ValueError("'num' should be > 0")
buf = create_string_buffer(num)
result = libcrypto.RAND_bytes(buf, num)
if check_result and result == 0:
raise RandError("Random Number Generator not seeded sufficiently")
return buf.raw[:num] | Returns num bytes of cryptographically strong pseudo-random
bytes. If checkc_result is True, raises error if PRNG is not
seeded enough | Below is the the instruction that describes the task:
### Input:
Returns num bytes of cryptographically strong pseudo-random
bytes. If checkc_result is True, raises error if PRNG is not
seeded enough
### Response:
def bytes(num, check_result=False):
"""
Returns num bytes of cryptographically strong pseudo-random
bytes. If checkc_result is True, raises error if PRNG is not
seeded enough
"""
if num <= 0:
raise ValueError("'num' should be > 0")
buf = create_string_buffer(num)
result = libcrypto.RAND_bytes(buf, num)
if check_result and result == 0:
raise RandError("Random Number Generator not seeded sufficiently")
return buf.raw[:num] |
def cluster(self, window):
""" Cluster the dict array, assuming it has the relevant Coinc colums,
time1, time2, stat, and timeslide_id
"""
# If no events, do nothing
if len(self.time1) == 0 or len(self.time2) == 0:
return self
from pycbc.events import cluster_coincs
interval = self.attrs['timeslide_interval']
cid = cluster_coincs(self.stat, self.time1, self.time2,
self.timeslide_id, interval, window)
return self.select(cid) | Cluster the dict array, assuming it has the relevant Coinc colums,
time1, time2, stat, and timeslide_id | Below is the the instruction that describes the task:
### Input:
Cluster the dict array, assuming it has the relevant Coinc colums,
time1, time2, stat, and timeslide_id
### Response:
def cluster(self, window):
""" Cluster the dict array, assuming it has the relevant Coinc colums,
time1, time2, stat, and timeslide_id
"""
# If no events, do nothing
if len(self.time1) == 0 or len(self.time2) == 0:
return self
from pycbc.events import cluster_coincs
interval = self.attrs['timeslide_interval']
cid = cluster_coincs(self.stat, self.time1, self.time2,
self.timeslide_id, interval, window)
return self.select(cid) |
def get_cash_asset_class(self) -> AssetClass:
""" Find the cash asset class by name. """
for ac in self.asset_classes:
if ac.name.lower() == "cash":
return ac
return None | Find the cash asset class by name. | Below is the the instruction that describes the task:
### Input:
Find the cash asset class by name.
### Response:
def get_cash_asset_class(self) -> AssetClass:
""" Find the cash asset class by name. """
for ac in self.asset_classes:
if ac.name.lower() == "cash":
return ac
return None |
def set_fun_prop(f, k, v):
"""Set the value of property `k` to be `v` in function `f`.
We define properties as annotations added to a function throughout
the process of defining a function for verification, e.g. the
argument types. This sets function `f`'s property named `k` to be
value `v`.
Users should never access this function directly.
"""
if not hasattr(f, _FUN_PROPS):
setattr(f, _FUN_PROPS, {})
if not isinstance(getattr(f, _FUN_PROPS), dict):
raise InternalError("Invalid properties dictionary for %s" % str(f))
getattr(f, _FUN_PROPS)[k] = v | Set the value of property `k` to be `v` in function `f`.
We define properties as annotations added to a function throughout
the process of defining a function for verification, e.g. the
argument types. This sets function `f`'s property named `k` to be
value `v`.
Users should never access this function directly. | Below is the the instruction that describes the task:
### Input:
Set the value of property `k` to be `v` in function `f`.
We define properties as annotations added to a function throughout
the process of defining a function for verification, e.g. the
argument types. This sets function `f`'s property named `k` to be
value `v`.
Users should never access this function directly.
### Response:
def set_fun_prop(f, k, v):
"""Set the value of property `k` to be `v` in function `f`.
We define properties as annotations added to a function throughout
the process of defining a function for verification, e.g. the
argument types. This sets function `f`'s property named `k` to be
value `v`.
Users should never access this function directly.
"""
if not hasattr(f, _FUN_PROPS):
setattr(f, _FUN_PROPS, {})
if not isinstance(getattr(f, _FUN_PROPS), dict):
raise InternalError("Invalid properties dictionary for %s" % str(f))
getattr(f, _FUN_PROPS)[k] = v |
def subscribe_condition_fulfilled(self, agreement_id, timeout, callback, args,
timeout_callback=None, wait=False):
"""
Subscribe to the condition fullfilled event.
:param agreement_id: id of the agreement, hex str
:param timeout:
:param callback:
:param args:
:param timeout_callback:
:param wait: if true block the listener until get the event, bool
:return:
"""
logger.info(
f'Subscribing {self.FULFILLED_EVENT} event with agreement id {agreement_id}.')
return self.subscribe_to_event(
self.FULFILLED_EVENT,
timeout,
{'_agreementId': Web3Provider.get_web3().toBytes(hexstr=agreement_id)},
callback=callback,
timeout_callback=timeout_callback,
args=args,
wait=wait
) | Subscribe to the condition fullfilled event.
:param agreement_id: id of the agreement, hex str
:param timeout:
:param callback:
:param args:
:param timeout_callback:
:param wait: if true block the listener until get the event, bool
:return: | Below is the the instruction that describes the task:
### Input:
Subscribe to the condition fullfilled event.
:param agreement_id: id of the agreement, hex str
:param timeout:
:param callback:
:param args:
:param timeout_callback:
:param wait: if true block the listener until get the event, bool
:return:
### Response:
def subscribe_condition_fulfilled(self, agreement_id, timeout, callback, args,
timeout_callback=None, wait=False):
"""
Subscribe to the condition fullfilled event.
:param agreement_id: id of the agreement, hex str
:param timeout:
:param callback:
:param args:
:param timeout_callback:
:param wait: if true block the listener until get the event, bool
:return:
"""
logger.info(
f'Subscribing {self.FULFILLED_EVENT} event with agreement id {agreement_id}.')
return self.subscribe_to_event(
self.FULFILLED_EVENT,
timeout,
{'_agreementId': Web3Provider.get_web3().toBytes(hexstr=agreement_id)},
callback=callback,
timeout_callback=timeout_callback,
args=args,
wait=wait
) |
def joinArgs(args):
""" Returns a query string (uses for HTTP URLs) where only the value is URL encoded.
Example return value: '?genre=action&type=1337'.
Parameters:
args (dict): Arguments to include in query string.
"""
if not args:
return ''
arglist = []
for key in sorted(args, key=lambda x: x.lower()):
value = compat.ustr(args[key])
arglist.append('%s=%s' % (key, compat.quote(value)))
return '?%s' % '&'.join(arglist) | Returns a query string (uses for HTTP URLs) where only the value is URL encoded.
Example return value: '?genre=action&type=1337'.
Parameters:
args (dict): Arguments to include in query string. | Below is the the instruction that describes the task:
### Input:
Returns a query string (uses for HTTP URLs) where only the value is URL encoded.
Example return value: '?genre=action&type=1337'.
Parameters:
args (dict): Arguments to include in query string.
### Response:
def joinArgs(args):
""" Returns a query string (uses for HTTP URLs) where only the value is URL encoded.
Example return value: '?genre=action&type=1337'.
Parameters:
args (dict): Arguments to include in query string.
"""
if not args:
return ''
arglist = []
for key in sorted(args, key=lambda x: x.lower()):
value = compat.ustr(args[key])
arglist.append('%s=%s' % (key, compat.quote(value)))
return '?%s' % '&'.join(arglist) |
def calc_riseset(t, target_name, location, prev_next, rise_set, horizon):
"""
Time at next rise/set of ``target``.
Parameters
----------
t : `~astropy.time.Time` or other (see below)
Time of observation. This will be passed in as the first argument to
the `~astropy.time.Time` initializer, so it can be anything that
`~astropy.time.Time` will accept (including a `~astropy.time.Time`
object)
target_name : str
'moon' or 'sun'
location : `~astropy.coordinates.EarthLocation`
Observatory location
prev_next : str - either 'previous' or 'next'
Test next rise/set or previous rise/set
rise_set : str - either 'rising' or 'setting'
Compute prev/next rise or prev/next set
location : `~astropy.coordinates.EarthLocation`
Location of observer
horizon : `~astropy.units.Quantity`
Degrees above/below actual horizon to use
for calculating rise/set times (i.e.,
-6 deg horizon = civil twilight, etc.)
Returns
-------
ret1 : `~astropy.time.Time`
Time of rise/set
"""
target = coord.get_body(target_name, t)
t0 = _rise_set_trig(t, target, location, prev_next, rise_set)
grid = t0 + np.linspace(-4*u.hour, 4*u.hour, 10)
altaz_frame = coord.AltAz(obstime=grid, location=location)
target = coord.get_body(target_name, grid)
altaz = target.transform_to(altaz_frame)
time_limits, altitude_limits = _horiz_cross(altaz.obstime, altaz.alt,
rise_set, horizon)
return _two_point_interp(time_limits, altitude_limits, horizon) | Time at next rise/set of ``target``.
Parameters
----------
t : `~astropy.time.Time` or other (see below)
Time of observation. This will be passed in as the first argument to
the `~astropy.time.Time` initializer, so it can be anything that
`~astropy.time.Time` will accept (including a `~astropy.time.Time`
object)
target_name : str
'moon' or 'sun'
location : `~astropy.coordinates.EarthLocation`
Observatory location
prev_next : str - either 'previous' or 'next'
Test next rise/set or previous rise/set
rise_set : str - either 'rising' or 'setting'
Compute prev/next rise or prev/next set
location : `~astropy.coordinates.EarthLocation`
Location of observer
horizon : `~astropy.units.Quantity`
Degrees above/below actual horizon to use
for calculating rise/set times (i.e.,
-6 deg horizon = civil twilight, etc.)
Returns
-------
ret1 : `~astropy.time.Time`
Time of rise/set | Below is the the instruction that describes the task:
### Input:
Time at next rise/set of ``target``.
Parameters
----------
t : `~astropy.time.Time` or other (see below)
Time of observation. This will be passed in as the first argument to
the `~astropy.time.Time` initializer, so it can be anything that
`~astropy.time.Time` will accept (including a `~astropy.time.Time`
object)
target_name : str
'moon' or 'sun'
location : `~astropy.coordinates.EarthLocation`
Observatory location
prev_next : str - either 'previous' or 'next'
Test next rise/set or previous rise/set
rise_set : str - either 'rising' or 'setting'
Compute prev/next rise or prev/next set
location : `~astropy.coordinates.EarthLocation`
Location of observer
horizon : `~astropy.units.Quantity`
Degrees above/below actual horizon to use
for calculating rise/set times (i.e.,
-6 deg horizon = civil twilight, etc.)
Returns
-------
ret1 : `~astropy.time.Time`
Time of rise/set
### Response:
def calc_riseset(t, target_name, location, prev_next, rise_set, horizon):
"""
Time at next rise/set of ``target``.
Parameters
----------
t : `~astropy.time.Time` or other (see below)
Time of observation. This will be passed in as the first argument to
the `~astropy.time.Time` initializer, so it can be anything that
`~astropy.time.Time` will accept (including a `~astropy.time.Time`
object)
target_name : str
'moon' or 'sun'
location : `~astropy.coordinates.EarthLocation`
Observatory location
prev_next : str - either 'previous' or 'next'
Test next rise/set or previous rise/set
rise_set : str - either 'rising' or 'setting'
Compute prev/next rise or prev/next set
location : `~astropy.coordinates.EarthLocation`
Location of observer
horizon : `~astropy.units.Quantity`
Degrees above/below actual horizon to use
for calculating rise/set times (i.e.,
-6 deg horizon = civil twilight, etc.)
Returns
-------
ret1 : `~astropy.time.Time`
Time of rise/set
"""
target = coord.get_body(target_name, t)
t0 = _rise_set_trig(t, target, location, prev_next, rise_set)
grid = t0 + np.linspace(-4*u.hour, 4*u.hour, 10)
altaz_frame = coord.AltAz(obstime=grid, location=location)
target = coord.get_body(target_name, grid)
altaz = target.transform_to(altaz_frame)
time_limits, altitude_limits = _horiz_cross(altaz.obstime, altaz.alt,
rise_set, horizon)
return _two_point_interp(time_limits, altitude_limits, horizon) |
def _start_of_decade(self):
"""
Reset the date to the first day of the decade.
:rtype: Date
"""
year = self.year - self.year % YEARS_PER_DECADE
return self.set(year, 1, 1) | Reset the date to the first day of the decade.
:rtype: Date | Below is the the instruction that describes the task:
### Input:
Reset the date to the first day of the decade.
:rtype: Date
### Response:
def _start_of_decade(self):
"""
Reset the date to the first day of the decade.
:rtype: Date
"""
year = self.year - self.year % YEARS_PER_DECADE
return self.set(year, 1, 1) |
def create_image_summary(name, val):
"""
Args:
name(str):
val(np.ndarray): 4D tensor of NHWC. assume RGB if C==3.
Can be either float or uint8. Range has to be [0,255].
Returns:
tf.Summary:
"""
assert isinstance(name, six.string_types), type(name)
n, h, w, c = val.shape
val = val.astype('uint8')
s = tf.Summary()
imparams = [cv2.IMWRITE_PNG_COMPRESSION, 9]
for k in range(n):
arr = val[k]
# CV2 will only write correctly in BGR chanel order
if c == 3:
arr = cv2.cvtColor(arr, cv2.COLOR_RGB2BGR)
elif c == 4:
arr = cv2.cvtColor(arr, cv2.COLOR_RGBA2BGRA)
tag = name if n == 1 else '{}/{}'.format(name, k)
retval, img_str = cv2.imencode('.png', arr, imparams)
if not retval:
# Encoding has failed.
continue
img_str = img_str.tostring()
img = tf.Summary.Image()
img.height = h
img.width = w
# 1 - grayscale 3 - RGB 4 - RGBA
img.colorspace = c
img.encoded_image_string = img_str
s.value.add(tag=tag, image=img)
return s | Args:
name(str):
val(np.ndarray): 4D tensor of NHWC. assume RGB if C==3.
Can be either float or uint8. Range has to be [0,255].
Returns:
tf.Summary: | Below is the the instruction that describes the task:
### Input:
Args:
name(str):
val(np.ndarray): 4D tensor of NHWC. assume RGB if C==3.
Can be either float or uint8. Range has to be [0,255].
Returns:
tf.Summary:
### Response:
def create_image_summary(name, val):
"""
Args:
name(str):
val(np.ndarray): 4D tensor of NHWC. assume RGB if C==3.
Can be either float or uint8. Range has to be [0,255].
Returns:
tf.Summary:
"""
assert isinstance(name, six.string_types), type(name)
n, h, w, c = val.shape
val = val.astype('uint8')
s = tf.Summary()
imparams = [cv2.IMWRITE_PNG_COMPRESSION, 9]
for k in range(n):
arr = val[k]
# CV2 will only write correctly in BGR chanel order
if c == 3:
arr = cv2.cvtColor(arr, cv2.COLOR_RGB2BGR)
elif c == 4:
arr = cv2.cvtColor(arr, cv2.COLOR_RGBA2BGRA)
tag = name if n == 1 else '{}/{}'.format(name, k)
retval, img_str = cv2.imencode('.png', arr, imparams)
if not retval:
# Encoding has failed.
continue
img_str = img_str.tostring()
img = tf.Summary.Image()
img.height = h
img.width = w
# 1 - grayscale 3 - RGB 4 - RGBA
img.colorspace = c
img.encoded_image_string = img_str
s.value.add(tag=tag, image=img)
return s |
def read_serialized_rsa_pub_key(serialized):
"""
Reads serialized RSA pub key
TAG|len-2B|value. 81 = exponent, 82 = modulus
:param serialized:
:return: n, e
"""
n = None
e = None
rsa = from_hex(serialized)
pos = 0
ln = len(rsa)
while pos < ln:
tag = bytes_to_byte(rsa, pos)
pos += 1
length = bytes_to_short(rsa, pos)
pos += 2
if tag == 0x81:
e = bytes_to_long(rsa[pos:pos+length])
elif tag == 0x82:
n = bytes_to_long(rsa[pos:pos+length])
pos += length
if e is None or n is None:
logger.warning("Could not process import key")
raise ValueError('Public key deserialization failed')
return n, e | Reads serialized RSA pub key
TAG|len-2B|value. 81 = exponent, 82 = modulus
:param serialized:
:return: n, e | Below is the the instruction that describes the task:
### Input:
Reads serialized RSA pub key
TAG|len-2B|value. 81 = exponent, 82 = modulus
:param serialized:
:return: n, e
### Response:
def read_serialized_rsa_pub_key(serialized):
"""
Reads serialized RSA pub key
TAG|len-2B|value. 81 = exponent, 82 = modulus
:param serialized:
:return: n, e
"""
n = None
e = None
rsa = from_hex(serialized)
pos = 0
ln = len(rsa)
while pos < ln:
tag = bytes_to_byte(rsa, pos)
pos += 1
length = bytes_to_short(rsa, pos)
pos += 2
if tag == 0x81:
e = bytes_to_long(rsa[pos:pos+length])
elif tag == 0x82:
n = bytes_to_long(rsa[pos:pos+length])
pos += length
if e is None or n is None:
logger.warning("Could not process import key")
raise ValueError('Public key deserialization failed')
return n, e |
def clean_dateobject_to_string(x):
"""Convert a Pandas Timestamp object or datetime object
to 'YYYY-MM-DD' string
Parameters
----------
x : str, list, tuple, numpy.ndarray, pandas.DataFrame
A Pandas Timestamp object or datetime object,
or an array of these objects
Returns
-------
y : str, list, tuple, numpy.ndarray, pandas.DataFrame
A string 'YYYY-MM-DD' or array of date strings.
Example
-------
The function aims to convert a string as follows
Timestamp('2014-09-23 00:00:00') => '2014-09-23'
datetime.datetime(2014,9,23,0,0) => '2014-09-23'
Code Example
------------
print(clean_dateobject_to_string(pd.Timestamp('2014-09-23 00:00:00')))
'2014-09-23'
print(clean_dateobject_to_string(datetime(2014,9,23,0,0)))
'2014-09-23'
Behavior
--------
- If it is not an object with strftime function the None is return
"""
import numpy as np
import pandas as pd
def proc_elem(e):
try:
return e.strftime("%Y-%m-%d")
except Exception as e:
print(e)
return None
def proc_list(x):
return [proc_elem(e) for e in x]
def proc_ndarray(x):
tmp = proc_list(list(x.reshape((x.size,))))
return np.array(tmp).reshape(x.shape)
# transform string, list/tuple, numpy array, pandas dataframe
if "strftime" in dir(x):
return proc_elem(x)
elif isinstance(x, (list, tuple)):
return proc_list(x)
elif isinstance(x, np.ndarray):
return proc_ndarray(x)
elif isinstance(x, pd.DataFrame):
return pd.DataFrame(proc_ndarray(x.values),
columns=x.columns,
index=x.index)
else:
return None | Convert a Pandas Timestamp object or datetime object
to 'YYYY-MM-DD' string
Parameters
----------
x : str, list, tuple, numpy.ndarray, pandas.DataFrame
A Pandas Timestamp object or datetime object,
or an array of these objects
Returns
-------
y : str, list, tuple, numpy.ndarray, pandas.DataFrame
A string 'YYYY-MM-DD' or array of date strings.
Example
-------
The function aims to convert a string as follows
Timestamp('2014-09-23 00:00:00') => '2014-09-23'
datetime.datetime(2014,9,23,0,0) => '2014-09-23'
Code Example
------------
print(clean_dateobject_to_string(pd.Timestamp('2014-09-23 00:00:00')))
'2014-09-23'
print(clean_dateobject_to_string(datetime(2014,9,23,0,0)))
'2014-09-23'
Behavior
--------
- If it is not an object with strftime function the None is return | Below is the the instruction that describes the task:
### Input:
Convert a Pandas Timestamp object or datetime object
to 'YYYY-MM-DD' string
Parameters
----------
x : str, list, tuple, numpy.ndarray, pandas.DataFrame
A Pandas Timestamp object or datetime object,
or an array of these objects
Returns
-------
y : str, list, tuple, numpy.ndarray, pandas.DataFrame
A string 'YYYY-MM-DD' or array of date strings.
Example
-------
The function aims to convert a string as follows
Timestamp('2014-09-23 00:00:00') => '2014-09-23'
datetime.datetime(2014,9,23,0,0) => '2014-09-23'
Code Example
------------
print(clean_dateobject_to_string(pd.Timestamp('2014-09-23 00:00:00')))
'2014-09-23'
print(clean_dateobject_to_string(datetime(2014,9,23,0,0)))
'2014-09-23'
Behavior
--------
- If it is not an object with strftime function the None is return
### Response:
def clean_dateobject_to_string(x):
"""Convert a Pandas Timestamp object or datetime object
to 'YYYY-MM-DD' string
Parameters
----------
x : str, list, tuple, numpy.ndarray, pandas.DataFrame
A Pandas Timestamp object or datetime object,
or an array of these objects
Returns
-------
y : str, list, tuple, numpy.ndarray, pandas.DataFrame
A string 'YYYY-MM-DD' or array of date strings.
Example
-------
The function aims to convert a string as follows
Timestamp('2014-09-23 00:00:00') => '2014-09-23'
datetime.datetime(2014,9,23,0,0) => '2014-09-23'
Code Example
------------
print(clean_dateobject_to_string(pd.Timestamp('2014-09-23 00:00:00')))
'2014-09-23'
print(clean_dateobject_to_string(datetime(2014,9,23,0,0)))
'2014-09-23'
Behavior
--------
- If it is not an object with strftime function the None is return
"""
import numpy as np
import pandas as pd
def proc_elem(e):
try:
return e.strftime("%Y-%m-%d")
except Exception as e:
print(e)
return None
def proc_list(x):
return [proc_elem(e) for e in x]
def proc_ndarray(x):
tmp = proc_list(list(x.reshape((x.size,))))
return np.array(tmp).reshape(x.shape)
# transform string, list/tuple, numpy array, pandas dataframe
if "strftime" in dir(x):
return proc_elem(x)
elif isinstance(x, (list, tuple)):
return proc_list(x)
elif isinstance(x, np.ndarray):
return proc_ndarray(x)
elif isinstance(x, pd.DataFrame):
return pd.DataFrame(proc_ndarray(x.values),
columns=x.columns,
index=x.index)
else:
return None |
def main(): # mini/unit test
"""
PURPOSE: command-line interface for map information
"""
options = optionsParser().parse_args()
params = getSelectionParams(options)
if options.list or options.details:
specifiedMaps = filterMapNames(
options.mapname,
records = filterMapAttrs(**params),
excludeRegex=options.exclude,
closestMatch=options.best
)
if specifiedMaps:
for v in specifiedMaps:
if options.details: v.display()
else: print(v)
print("Found %d maps that match given criteria."%(len(specifiedMaps)))
else:
print("No matching maps found.")
else:
try:
specifiedMaps = selectMap(
options.mapname,
excludeName =options.exclude,
closestMatch=options.best,
**params)
except Exception as e:
specifiedMaps = []
print("No matching maps found: %s"%e)
if not isinstance(specifiedMaps, list):
specifiedMaps = [specifiedMaps]
for m in specifiedMaps:
if options.path: print(m.path)
else: print(m.name) | PURPOSE: command-line interface for map information | Below is the the instruction that describes the task:
### Input:
PURPOSE: command-line interface for map information
### Response:
def main(): # mini/unit test
"""
PURPOSE: command-line interface for map information
"""
options = optionsParser().parse_args()
params = getSelectionParams(options)
if options.list or options.details:
specifiedMaps = filterMapNames(
options.mapname,
records = filterMapAttrs(**params),
excludeRegex=options.exclude,
closestMatch=options.best
)
if specifiedMaps:
for v in specifiedMaps:
if options.details: v.display()
else: print(v)
print("Found %d maps that match given criteria."%(len(specifiedMaps)))
else:
print("No matching maps found.")
else:
try:
specifiedMaps = selectMap(
options.mapname,
excludeName =options.exclude,
closestMatch=options.best,
**params)
except Exception as e:
specifiedMaps = []
print("No matching maps found: %s"%e)
if not isinstance(specifiedMaps, list):
specifiedMaps = [specifiedMaps]
for m in specifiedMaps:
if options.path: print(m.path)
else: print(m.name) |
def _query_entities(self, table_name, filter=None, select=None, max_results=None,
marker=None, accept=TablePayloadFormat.JSON_MINIMAL_METADATA,
property_resolver=None, timeout=None):
'''
Returns a list of entities under the specified table. Makes a single list
request to the service. Used internally by the query_entities method.
:param str table_name:
The name of the table to query.
:param str filter:
Returns only entities that satisfy the specified filter. Note that
no more than 15 discrete comparisons are permitted within a $filter
string. See http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx
for more information on constructing filters.
:param str select:
Returns only the desired properties of an entity from the set.
:param int top:
The maximum number of entities to return.
:param marker:
A dictionary which identifies the portion of the query to be
returned with the next query operation. The operation returns a
next_marker element within the response body if the list returned
was not complete. This value may then be used as a query parameter
in a subsequent call to request the next portion of the list of
queues. The marker value is opaque to the client.
:type marker: obj
:param str accept:
Specifies the accepted content type of the response payload. See
:class:`~azure.storage.table.models.TablePayloadFormat` for possible
values.
:param property_resolver:
A function which given the partition key, row key, property name,
property value, and the property EdmType if returned by the service,
returns the EdmType of the property. Generally used if accept is set
to JSON_NO_METADATA.
:type property_resolver: callback function in format of func(pk, rk, prop_name, prop_value, service_edm_type)
:param int timeout:
The server timeout, expressed in seconds.
:return: A list of entities, potentially with a next_marker property.
:rtype: list of :class:`~azure.storage.table.models.Entity`
'''
_validate_not_none('table_name', table_name)
_validate_not_none('accept', accept)
next_partition_key = None if marker is None else marker.get('nextpartitionkey')
next_row_key = None if marker is None else marker.get('nextrowkey')
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _to_str(table_name) + '()'
request.headers = [('Accept', _to_str(accept))]
request.query = [
('$filter', _to_str(filter)),
('$select', _to_str(select)),
('$top', _int_to_str(max_results)),
('NextPartitionKey', _to_str(next_partition_key)),
('NextRowKey', _to_str(next_row_key)),
('timeout', _int_to_str(timeout)),
]
response = self._perform_request(request)
return _convert_json_response_to_entities(response, property_resolver) | Returns a list of entities under the specified table. Makes a single list
request to the service. Used internally by the query_entities method.
:param str table_name:
The name of the table to query.
:param str filter:
Returns only entities that satisfy the specified filter. Note that
no more than 15 discrete comparisons are permitted within a $filter
string. See http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx
for more information on constructing filters.
:param str select:
Returns only the desired properties of an entity from the set.
:param int top:
The maximum number of entities to return.
:param marker:
A dictionary which identifies the portion of the query to be
returned with the next query operation. The operation returns a
next_marker element within the response body if the list returned
was not complete. This value may then be used as a query parameter
in a subsequent call to request the next portion of the list of
queues. The marker value is opaque to the client.
:type marker: obj
:param str accept:
Specifies the accepted content type of the response payload. See
:class:`~azure.storage.table.models.TablePayloadFormat` for possible
values.
:param property_resolver:
A function which given the partition key, row key, property name,
property value, and the property EdmType if returned by the service,
returns the EdmType of the property. Generally used if accept is set
to JSON_NO_METADATA.
:type property_resolver: callback function in format of func(pk, rk, prop_name, prop_value, service_edm_type)
:param int timeout:
The server timeout, expressed in seconds.
:return: A list of entities, potentially with a next_marker property.
:rtype: list of :class:`~azure.storage.table.models.Entity` | Below is the the instruction that describes the task:
### Input:
Returns a list of entities under the specified table. Makes a single list
request to the service. Used internally by the query_entities method.
:param str table_name:
The name of the table to query.
:param str filter:
Returns only entities that satisfy the specified filter. Note that
no more than 15 discrete comparisons are permitted within a $filter
string. See http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx
for more information on constructing filters.
:param str select:
Returns only the desired properties of an entity from the set.
:param int top:
The maximum number of entities to return.
:param marker:
A dictionary which identifies the portion of the query to be
returned with the next query operation. The operation returns a
next_marker element within the response body if the list returned
was not complete. This value may then be used as a query parameter
in a subsequent call to request the next portion of the list of
queues. The marker value is opaque to the client.
:type marker: obj
:param str accept:
Specifies the accepted content type of the response payload. See
:class:`~azure.storage.table.models.TablePayloadFormat` for possible
values.
:param property_resolver:
A function which given the partition key, row key, property name,
property value, and the property EdmType if returned by the service,
returns the EdmType of the property. Generally used if accept is set
to JSON_NO_METADATA.
:type property_resolver: callback function in format of func(pk, rk, prop_name, prop_value, service_edm_type)
:param int timeout:
The server timeout, expressed in seconds.
:return: A list of entities, potentially with a next_marker property.
:rtype: list of :class:`~azure.storage.table.models.Entity`
### Response:
def _query_entities(self, table_name, filter=None, select=None, max_results=None,
marker=None, accept=TablePayloadFormat.JSON_MINIMAL_METADATA,
property_resolver=None, timeout=None):
'''
Returns a list of entities under the specified table. Makes a single list
request to the service. Used internally by the query_entities method.
:param str table_name:
The name of the table to query.
:param str filter:
Returns only entities that satisfy the specified filter. Note that
no more than 15 discrete comparisons are permitted within a $filter
string. See http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx
for more information on constructing filters.
:param str select:
Returns only the desired properties of an entity from the set.
:param int top:
The maximum number of entities to return.
:param marker:
A dictionary which identifies the portion of the query to be
returned with the next query operation. The operation returns a
next_marker element within the response body if the list returned
was not complete. This value may then be used as a query parameter
in a subsequent call to request the next portion of the list of
queues. The marker value is opaque to the client.
:type marker: obj
:param str accept:
Specifies the accepted content type of the response payload. See
:class:`~azure.storage.table.models.TablePayloadFormat` for possible
values.
:param property_resolver:
A function which given the partition key, row key, property name,
property value, and the property EdmType if returned by the service,
returns the EdmType of the property. Generally used if accept is set
to JSON_NO_METADATA.
:type property_resolver: callback function in format of func(pk, rk, prop_name, prop_value, service_edm_type)
:param int timeout:
The server timeout, expressed in seconds.
:return: A list of entities, potentially with a next_marker property.
:rtype: list of :class:`~azure.storage.table.models.Entity`
'''
_validate_not_none('table_name', table_name)
_validate_not_none('accept', accept)
next_partition_key = None if marker is None else marker.get('nextpartitionkey')
next_row_key = None if marker is None else marker.get('nextrowkey')
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _to_str(table_name) + '()'
request.headers = [('Accept', _to_str(accept))]
request.query = [
('$filter', _to_str(filter)),
('$select', _to_str(select)),
('$top', _int_to_str(max_results)),
('NextPartitionKey', _to_str(next_partition_key)),
('NextRowKey', _to_str(next_row_key)),
('timeout', _int_to_str(timeout)),
]
response = self._perform_request(request)
return _convert_json_response_to_entities(response, property_resolver) |
def on_nick(self, connection, event):
"""
Someone changed their nickname - send the nicknames list to the
WebSocket.
"""
old_nickname = self.get_nickname(event)
old_color = self.nicknames.pop(old_nickname)
new_nickname = event.target()
message = "is now known as %s" % new_nickname
self.namespace.emit("message", old_nickname, message, old_color)
new_color = color(new_nickname)
self.nicknames[new_nickname] = new_color
self.emit_nicknames()
if self.nickname == old_nickname:
self.nickname = new_nickname | Someone changed their nickname - send the nicknames list to the
WebSocket. | Below is the the instruction that describes the task:
### Input:
Someone changed their nickname - send the nicknames list to the
WebSocket.
### Response:
def on_nick(self, connection, event):
"""
Someone changed their nickname - send the nicknames list to the
WebSocket.
"""
old_nickname = self.get_nickname(event)
old_color = self.nicknames.pop(old_nickname)
new_nickname = event.target()
message = "is now known as %s" % new_nickname
self.namespace.emit("message", old_nickname, message, old_color)
new_color = color(new_nickname)
self.nicknames[new_nickname] = new_color
self.emit_nicknames()
if self.nickname == old_nickname:
self.nickname = new_nickname |
def update_stack(self, fqn, template, old_parameters, parameters, tags,
force_interactive=False, force_change_set=False,
stack_policy=None, **kwargs):
"""Update a Cloudformation stack.
Args:
fqn (str): The fully qualified name of the Cloudformation stack.
template (:class:`stacker.providers.base.Template`): A Template
object to use when updating the stack.
old_parameters (list): A list of dictionaries that defines the
parameter list on the existing Cloudformation stack.
parameters (list): A list of dictionaries that defines the
parameter list to be applied to the Cloudformation stack.
tags (list): A list of dictionaries that defines the tags
that should be applied to the Cloudformation stack.
force_interactive (bool): A flag that indicates whether the update
should be interactive. If set to True, interactive mode will
be used no matter if the provider is in interactive mode or
not. False will follow the behavior of the provider.
force_change_set (bool): A flag that indicates whether the update
must be executed with a change set.
stack_policy (:class:`stacker.providers.base.Template`): A template
object representing a stack policy.
"""
logger.debug("Attempting to update stack %s:", fqn)
logger.debug(" parameters: %s", parameters)
logger.debug(" tags: %s", tags)
if template.url:
logger.debug(" template_url: %s", template.url)
else:
logger.debug(" no template url, uploading template directly.")
update_method = self.select_update_method(force_interactive,
force_change_set)
return update_method(fqn, template, old_parameters, parameters,
stack_policy=stack_policy, tags=tags, **kwargs) | Update a Cloudformation stack.
Args:
fqn (str): The fully qualified name of the Cloudformation stack.
template (:class:`stacker.providers.base.Template`): A Template
object to use when updating the stack.
old_parameters (list): A list of dictionaries that defines the
parameter list on the existing Cloudformation stack.
parameters (list): A list of dictionaries that defines the
parameter list to be applied to the Cloudformation stack.
tags (list): A list of dictionaries that defines the tags
that should be applied to the Cloudformation stack.
force_interactive (bool): A flag that indicates whether the update
should be interactive. If set to True, interactive mode will
be used no matter if the provider is in interactive mode or
not. False will follow the behavior of the provider.
force_change_set (bool): A flag that indicates whether the update
must be executed with a change set.
stack_policy (:class:`stacker.providers.base.Template`): A template
object representing a stack policy. | Below is the the instruction that describes the task:
### Input:
Update a Cloudformation stack.
Args:
fqn (str): The fully qualified name of the Cloudformation stack.
template (:class:`stacker.providers.base.Template`): A Template
object to use when updating the stack.
old_parameters (list): A list of dictionaries that defines the
parameter list on the existing Cloudformation stack.
parameters (list): A list of dictionaries that defines the
parameter list to be applied to the Cloudformation stack.
tags (list): A list of dictionaries that defines the tags
that should be applied to the Cloudformation stack.
force_interactive (bool): A flag that indicates whether the update
should be interactive. If set to True, interactive mode will
be used no matter if the provider is in interactive mode or
not. False will follow the behavior of the provider.
force_change_set (bool): A flag that indicates whether the update
must be executed with a change set.
stack_policy (:class:`stacker.providers.base.Template`): A template
object representing a stack policy.
### Response:
def update_stack(self, fqn, template, old_parameters, parameters, tags,
force_interactive=False, force_change_set=False,
stack_policy=None, **kwargs):
"""Update a Cloudformation stack.
Args:
fqn (str): The fully qualified name of the Cloudformation stack.
template (:class:`stacker.providers.base.Template`): A Template
object to use when updating the stack.
old_parameters (list): A list of dictionaries that defines the
parameter list on the existing Cloudformation stack.
parameters (list): A list of dictionaries that defines the
parameter list to be applied to the Cloudformation stack.
tags (list): A list of dictionaries that defines the tags
that should be applied to the Cloudformation stack.
force_interactive (bool): A flag that indicates whether the update
should be interactive. If set to True, interactive mode will
be used no matter if the provider is in interactive mode or
not. False will follow the behavior of the provider.
force_change_set (bool): A flag that indicates whether the update
must be executed with a change set.
stack_policy (:class:`stacker.providers.base.Template`): A template
object representing a stack policy.
"""
logger.debug("Attempting to update stack %s:", fqn)
logger.debug(" parameters: %s", parameters)
logger.debug(" tags: %s", tags)
if template.url:
logger.debug(" template_url: %s", template.url)
else:
logger.debug(" no template url, uploading template directly.")
update_method = self.select_update_method(force_interactive,
force_change_set)
return update_method(fqn, template, old_parameters, parameters,
stack_policy=stack_policy, tags=tags, **kwargs) |
def unravel_sections(section_data):
"""Unravels section type dictionary into flat list of sections with
section type set as an attribute.
Args:
section_data(dict): Data return from py:method::get_sections
Returns:
list: Flat list of sections with ``sectionType`` set to
type (i.e. recitation, lecture, etc)
"""
sections = []
for type, subsection_list in section_data.items():
for section in subsection_list:
section['sectionType'] = type
sections.append(section)
return sections | Unravels section type dictionary into flat list of sections with
section type set as an attribute.
Args:
section_data(dict): Data return from py:method::get_sections
Returns:
list: Flat list of sections with ``sectionType`` set to
type (i.e. recitation, lecture, etc) | Below is the the instruction that describes the task:
### Input:
Unravels section type dictionary into flat list of sections with
section type set as an attribute.
Args:
section_data(dict): Data return from py:method::get_sections
Returns:
list: Flat list of sections with ``sectionType`` set to
type (i.e. recitation, lecture, etc)
### Response:
def unravel_sections(section_data):
"""Unravels section type dictionary into flat list of sections with
section type set as an attribute.
Args:
section_data(dict): Data return from py:method::get_sections
Returns:
list: Flat list of sections with ``sectionType`` set to
type (i.e. recitation, lecture, etc)
"""
sections = []
for type, subsection_list in section_data.items():
for section in subsection_list:
section['sectionType'] = type
sections.append(section)
return sections |
def alter_function(self, dbName, funcName, newFunc):
"""
Parameters:
- dbName
- funcName
- newFunc
"""
self.send_alter_function(dbName, funcName, newFunc)
self.recv_alter_function() | Parameters:
- dbName
- funcName
- newFunc | Below is the the instruction that describes the task:
### Input:
Parameters:
- dbName
- funcName
- newFunc
### Response:
def alter_function(self, dbName, funcName, newFunc):
"""
Parameters:
- dbName
- funcName
- newFunc
"""
self.send_alter_function(dbName, funcName, newFunc)
self.recv_alter_function() |
def mavlink_packet(self, m):
'''handle a mavlink packet'''
mtype = m.get_type()
if mtype == "SYS_STATUS":
self.battery_update(m)
if mtype == "BATTERY2":
self.battery2_voltage = m.voltage * 0.001
if mtype == "POWER_STATUS":
self.power_status_update(m)
if self.battery_period.trigger():
self.battery_report() | handle a mavlink packet | Below is the the instruction that describes the task:
### Input:
handle a mavlink packet
### Response:
def mavlink_packet(self, m):
'''handle a mavlink packet'''
mtype = m.get_type()
if mtype == "SYS_STATUS":
self.battery_update(m)
if mtype == "BATTERY2":
self.battery2_voltage = m.voltage * 0.001
if mtype == "POWER_STATUS":
self.power_status_update(m)
if self.battery_period.trigger():
self.battery_report() |
def put(f, s3_path, multipart_chunk_size_mb=500, logger=None):
'''
Uploads a single file to S3, using s3cmd.
Args:
f (str): Path to a single file.
s3_path (str): The S3 path, with the filename omitted. The S3 filename
will be the basename of the ``f``. For example::
put(f='/path/to/myfile.tar.gz', s3_path='s3://my_bucket/path/to/')
will result in an uploaded S3 path of ``s3://my_bucket/path/to/myfile.tar.gz``
'''
if not logger:
logger = log.get_logger('s3')
fname = os.path.basename(f)
target = os.path.join(s3_path, fname)
s3cmd_cline = 's3cmd put {} {} --multipart-chunk-size-mb {}'.format(f,
target,
multipart_chunk_size_mb)
print_put_info(fname, target, logger)
s3cmd = sp.Popen(s3cmd_cline,
stdout=sp.PIPE,
stderr=sp.PIPE,
shell=True)
stdout, stderr = s3cmd.communicate() | Uploads a single file to S3, using s3cmd.
Args:
f (str): Path to a single file.
s3_path (str): The S3 path, with the filename omitted. The S3 filename
will be the basename of the ``f``. For example::
put(f='/path/to/myfile.tar.gz', s3_path='s3://my_bucket/path/to/')
will result in an uploaded S3 path of ``s3://my_bucket/path/to/myfile.tar.gz`` | Below is the the instruction that describes the task:
### Input:
Uploads a single file to S3, using s3cmd.
Args:
f (str): Path to a single file.
s3_path (str): The S3 path, with the filename omitted. The S3 filename
will be the basename of the ``f``. For example::
put(f='/path/to/myfile.tar.gz', s3_path='s3://my_bucket/path/to/')
will result in an uploaded S3 path of ``s3://my_bucket/path/to/myfile.tar.gz``
### Response:
def put(f, s3_path, multipart_chunk_size_mb=500, logger=None):
'''
Uploads a single file to S3, using s3cmd.
Args:
f (str): Path to a single file.
s3_path (str): The S3 path, with the filename omitted. The S3 filename
will be the basename of the ``f``. For example::
put(f='/path/to/myfile.tar.gz', s3_path='s3://my_bucket/path/to/')
will result in an uploaded S3 path of ``s3://my_bucket/path/to/myfile.tar.gz``
'''
if not logger:
logger = log.get_logger('s3')
fname = os.path.basename(f)
target = os.path.join(s3_path, fname)
s3cmd_cline = 's3cmd put {} {} --multipart-chunk-size-mb {}'.format(f,
target,
multipart_chunk_size_mb)
print_put_info(fname, target, logger)
s3cmd = sp.Popen(s3cmd_cline,
stdout=sp.PIPE,
stderr=sp.PIPE,
shell=True)
stdout, stderr = s3cmd.communicate() |
def close(self) -> None:
"""Closes connection to the LifeSOS ethernet interface."""
self.cancel_pending_tasks()
_LOGGER.debug("Disconnected")
if self._transport:
self._transport.close()
self._is_connected = False | Closes connection to the LifeSOS ethernet interface. | Below is the the instruction that describes the task:
### Input:
Closes connection to the LifeSOS ethernet interface.
### Response:
def close(self) -> None:
"""Closes connection to the LifeSOS ethernet interface."""
self.cancel_pending_tasks()
_LOGGER.debug("Disconnected")
if self._transport:
self._transport.close()
self._is_connected = False |
def set_misc(self):
'''Set more parameters, after the tank is better defined than in the
__init__ function.
Notes
-----
Two of D, L, and L_over_D must be known when this function runs.
The other one is set from the other two first thing in this function.
a_ratio parameters are used to calculate a values for the heads here,
if applicable.
Radius is calculated here.
Maximum tank height is calculated here.
V_total is calculated here.
'''
if self.D and self.L:
# If L and D are known, get L_over_D
self.L_over_D = self.L/self.D
elif self.D and self.L_over_D:
# Otherwise, if L_over_D and D are provided, get L
self.L = self.D*self.L_over_D
elif self.L and self.L_over_D:
# Otherwise, if L_over_D and L are provided, get D
self.D = self.L/self.L_over_D
# Calculate diameter
self.R = self.D/2.
# If a_ratio is provided for either heads, use it.
if self.sideA and self.D:
if not self.sideA_a and self.sideA in ['conical', 'ellipsoidal', 'guppy', 'spherical']:
self.sideA_a = self.D*self.sideA_a_ratio
if self.sideB and self.D:
if not self.sideB_a and self.sideB in ['conical', 'ellipsoidal', 'guppy', 'spherical']:
self.sideB_a = self.D*self.sideB_a_ratio
# Calculate a for torispherical heads
if self.sideA == 'torispherical' and self.sideA_f and self.sideA_k:
self.sideA_a = a_torispherical(self.D, self.sideA_f, self.sideA_k)
if self.sideB == 'torispherical' and self.sideB_f and self.sideB_k:
self.sideB_a = a_torispherical(self.D, self.sideB_f, self.sideB_k)
# Calculate maximum tank height, h_max
if self.horizontal:
self.h_max = self.D
else:
self.h_max = self.L
if self.sideA_a:
self.h_max += self.sideA_a
if self.sideB_a:
self.h_max += self.sideB_a
# Set maximum height
self.V_total = self.V_from_h(self.h_max)
# Set surface areas
self.A, (self.A_sideA, self.A_sideB, self.A_lateral) = SA_tank(
D=self.D, L=self.L, sideA=self.sideA, sideB=self.sideB, sideA_a=self.sideA_a,
sideB_a=self.sideB_a, sideA_f=self.sideA_f, sideA_k=self.sideA_k,
sideB_f=self.sideB_f, sideB_k=self.sideB_k,
full_output=True) | Set more parameters, after the tank is better defined than in the
__init__ function.
Notes
-----
Two of D, L, and L_over_D must be known when this function runs.
The other one is set from the other two first thing in this function.
a_ratio parameters are used to calculate a values for the heads here,
if applicable.
Radius is calculated here.
Maximum tank height is calculated here.
V_total is calculated here. | Below is the the instruction that describes the task:
### Input:
Set more parameters, after the tank is better defined than in the
__init__ function.
Notes
-----
Two of D, L, and L_over_D must be known when this function runs.
The other one is set from the other two first thing in this function.
a_ratio parameters are used to calculate a values for the heads here,
if applicable.
Radius is calculated here.
Maximum tank height is calculated here.
V_total is calculated here.
### Response:
def set_misc(self):
'''Set more parameters, after the tank is better defined than in the
__init__ function.
Notes
-----
Two of D, L, and L_over_D must be known when this function runs.
The other one is set from the other two first thing in this function.
a_ratio parameters are used to calculate a values for the heads here,
if applicable.
Radius is calculated here.
Maximum tank height is calculated here.
V_total is calculated here.
'''
if self.D and self.L:
# If L and D are known, get L_over_D
self.L_over_D = self.L/self.D
elif self.D and self.L_over_D:
# Otherwise, if L_over_D and D are provided, get L
self.L = self.D*self.L_over_D
elif self.L and self.L_over_D:
# Otherwise, if L_over_D and L are provided, get D
self.D = self.L/self.L_over_D
# Calculate diameter
self.R = self.D/2.
# If a_ratio is provided for either heads, use it.
if self.sideA and self.D:
if not self.sideA_a and self.sideA in ['conical', 'ellipsoidal', 'guppy', 'spherical']:
self.sideA_a = self.D*self.sideA_a_ratio
if self.sideB and self.D:
if not self.sideB_a and self.sideB in ['conical', 'ellipsoidal', 'guppy', 'spherical']:
self.sideB_a = self.D*self.sideB_a_ratio
# Calculate a for torispherical heads
if self.sideA == 'torispherical' and self.sideA_f and self.sideA_k:
self.sideA_a = a_torispherical(self.D, self.sideA_f, self.sideA_k)
if self.sideB == 'torispherical' and self.sideB_f and self.sideB_k:
self.sideB_a = a_torispherical(self.D, self.sideB_f, self.sideB_k)
# Calculate maximum tank height, h_max
if self.horizontal:
self.h_max = self.D
else:
self.h_max = self.L
if self.sideA_a:
self.h_max += self.sideA_a
if self.sideB_a:
self.h_max += self.sideB_a
# Set maximum height
self.V_total = self.V_from_h(self.h_max)
# Set surface areas
self.A, (self.A_sideA, self.A_sideB, self.A_lateral) = SA_tank(
D=self.D, L=self.L, sideA=self.sideA, sideB=self.sideB, sideA_a=self.sideA_a,
sideB_a=self.sideB_a, sideA_f=self.sideA_f, sideA_k=self.sideA_k,
sideB_f=self.sideB_f, sideB_k=self.sideB_k,
full_output=True) |
def encode(self):
"""Encode a GNTP Registration Message
:return string: Encoded GNTP Registration message. Returned as a byte string
"""
buff = _GNTPBuffer()
buff.writeln(self._format_info())
#Headers
for k, v in self.headers.items():
buff.writeheader(k, v)
buff.writeln()
#Notifications
if len(self.notifications) > 0:
for notice in self.notifications:
for k, v in notice.items():
buff.writeheader(k, v)
buff.writeln()
#Resources
for resource, data in self.resources.items():
buff.writeheader('Identifier', resource)
buff.writeheader('Length', len(data))
buff.writeln()
buff.write(data)
buff.writeln()
buff.writeln()
return buff.getvalue() | Encode a GNTP Registration Message
:return string: Encoded GNTP Registration message. Returned as a byte string | Below is the the instruction that describes the task:
### Input:
Encode a GNTP Registration Message
:return string: Encoded GNTP Registration message. Returned as a byte string
### Response:
def encode(self):
"""Encode a GNTP Registration Message
:return string: Encoded GNTP Registration message. Returned as a byte string
"""
buff = _GNTPBuffer()
buff.writeln(self._format_info())
#Headers
for k, v in self.headers.items():
buff.writeheader(k, v)
buff.writeln()
#Notifications
if len(self.notifications) > 0:
for notice in self.notifications:
for k, v in notice.items():
buff.writeheader(k, v)
buff.writeln()
#Resources
for resource, data in self.resources.items():
buff.writeheader('Identifier', resource)
buff.writeheader('Length', len(data))
buff.writeln()
buff.write(data)
buff.writeln()
buff.writeln()
return buff.getvalue() |
def connectionLost(self, reason):
"""
If a login has happened, perform a logout.
"""
AMP.connectionLost(self, reason)
if self.logout is not None:
self.logout()
self.boxReceiver = self.logout = None | If a login has happened, perform a logout. | Below is the the instruction that describes the task:
### Input:
If a login has happened, perform a logout.
### Response:
def connectionLost(self, reason):
"""
If a login has happened, perform a logout.
"""
AMP.connectionLost(self, reason)
if self.logout is not None:
self.logout()
self.boxReceiver = self.logout = None |
def display_value(self, ctx, value):
""" Display value to be used for this parameter. """
gandi = ctx.obj
gandi.log('%s: %s' % (self.name, (value if value is not None
else 'Not found'))) | Display value to be used for this parameter. | Below is the the instruction that describes the task:
### Input:
Display value to be used for this parameter.
### Response:
def display_value(self, ctx, value):
""" Display value to be used for this parameter. """
gandi = ctx.obj
gandi.log('%s: %s' % (self.name, (value if value is not None
else 'Not found'))) |
def main():
"""
Loop over a list of input text strings. Parse each string using a list of
parsers, one included in megaparsex and one defined in this script. If a
confirmation is requested, seek confirmation, otherwise display any response
text and engage any triggered functions.
"""
for text in [
"how are you",
"ip address",
"restart",
"run command",
"rain EGPF",
"reverse SSH"
]:
print("\nparse text: " + text + "\nWait 3 seconds, then parse.")
time.sleep(3)
response = megaparsex.multiparse(
text = text,
parsers = [
megaparsex.parse,
parse_networking
],
help_message = "Does not compute. I can report my IP address and I "
"can restart my script."
)
if type(response) is megaparsex.confirmation:
while response.confirmed() is None:
response.test(
text = megaparsex.get_input(
prompt = response.prompt() + " "
)
)
if response.confirmed():
print(response.feedback())
response.run()
else:
print(response.feedback())
elif type(response) is megaparsex.command:
output = response.engage_command(
command = megaparsex.get_input(
prompt = response.prompt() + " "
),
background = False
)
if output:
print("output:\n{output}".format(output = output))
else:
print(response) | Loop over a list of input text strings. Parse each string using a list of
parsers, one included in megaparsex and one defined in this script. If a
confirmation is requested, seek confirmation, otherwise display any response
text and engage any triggered functions. | Below is the the instruction that describes the task:
### Input:
Loop over a list of input text strings. Parse each string using a list of
parsers, one included in megaparsex and one defined in this script. If a
confirmation is requested, seek confirmation, otherwise display any response
text and engage any triggered functions.
### Response:
def main():
"""
Loop over a list of input text strings. Parse each string using a list of
parsers, one included in megaparsex and one defined in this script. If a
confirmation is requested, seek confirmation, otherwise display any response
text and engage any triggered functions.
"""
for text in [
"how are you",
"ip address",
"restart",
"run command",
"rain EGPF",
"reverse SSH"
]:
print("\nparse text: " + text + "\nWait 3 seconds, then parse.")
time.sleep(3)
response = megaparsex.multiparse(
text = text,
parsers = [
megaparsex.parse,
parse_networking
],
help_message = "Does not compute. I can report my IP address and I "
"can restart my script."
)
if type(response) is megaparsex.confirmation:
while response.confirmed() is None:
response.test(
text = megaparsex.get_input(
prompt = response.prompt() + " "
)
)
if response.confirmed():
print(response.feedback())
response.run()
else:
print(response.feedback())
elif type(response) is megaparsex.command:
output = response.engage_command(
command = megaparsex.get_input(
prompt = response.prompt() + " "
),
background = False
)
if output:
print("output:\n{output}".format(output = output))
else:
print(response) |
def today(self) -> datetime:
""" Returns today (date only) as datetime """
self.value = datetime.combine(datetime.today().date(), time.min)
return self.value | Returns today (date only) as datetime | Below is the the instruction that describes the task:
### Input:
Returns today (date only) as datetime
### Response:
def today(self) -> datetime:
""" Returns today (date only) as datetime """
self.value = datetime.combine(datetime.today().date(), time.min)
return self.value |
def _parse_value(self, raw):
"""Parses value
:param raw: raw value
:return: Parsed value
"""
try:
if not raw.startswith("0"):
val = float(raw)
if (val % 1) == 0: # integer
val = int(raw)
return str(val)
return self.num_format.format(val)
else:
raise ValueError("Cannot parse int!")
except:
return str(raw) | Parses value
:param raw: raw value
:return: Parsed value | Below is the the instruction that describes the task:
### Input:
Parses value
:param raw: raw value
:return: Parsed value
### Response:
def _parse_value(self, raw):
"""Parses value
:param raw: raw value
:return: Parsed value
"""
try:
if not raw.startswith("0"):
val = float(raw)
if (val % 1) == 0: # integer
val = int(raw)
return str(val)
return self.num_format.format(val)
else:
raise ValueError("Cannot parse int!")
except:
return str(raw) |
def stop(self):
"""
Restore the TTY to its original state.
"""
_curses.nocbreak()
self.window.keypad(0)
_curses.echo()
_curses.resetty()
_curses.endwin()
self.running = False | Restore the TTY to its original state. | Below is the the instruction that describes the task:
### Input:
Restore the TTY to its original state.
### Response:
def stop(self):
"""
Restore the TTY to its original state.
"""
_curses.nocbreak()
self.window.keypad(0)
_curses.echo()
_curses.resetty()
_curses.endwin()
self.running = False |
def _combine_contract(self, years, wages):
"""
Combine the contract wages and year.
Match the wages with the year and add to a dictionary representing the
player's contract.
Parameters
----------
years : list
A list where each element is a string denoting the season, such as
'2017-18'.
wages : list
A list of all wages where each element is a string denoting the
dollar amount, such as '$40,000,000'.
Returns
-------
dictionary
Returns a dictionary representing the player's contract where each
key is a ``string`` of the season, such as '2017-18' and each value
is a ``string`` of the wages, such as '$40,000,000'.
"""
contract = {}
for i in range(len(years)):
contract[years[i]] = wages[i]
return contract | Combine the contract wages and year.
Match the wages with the year and add to a dictionary representing the
player's contract.
Parameters
----------
years : list
A list where each element is a string denoting the season, such as
'2017-18'.
wages : list
A list of all wages where each element is a string denoting the
dollar amount, such as '$40,000,000'.
Returns
-------
dictionary
Returns a dictionary representing the player's contract where each
key is a ``string`` of the season, such as '2017-18' and each value
is a ``string`` of the wages, such as '$40,000,000'. | Below is the the instruction that describes the task:
### Input:
Combine the contract wages and year.
Match the wages with the year and add to a dictionary representing the
player's contract.
Parameters
----------
years : list
A list where each element is a string denoting the season, such as
'2017-18'.
wages : list
A list of all wages where each element is a string denoting the
dollar amount, such as '$40,000,000'.
Returns
-------
dictionary
Returns a dictionary representing the player's contract where each
key is a ``string`` of the season, such as '2017-18' and each value
is a ``string`` of the wages, such as '$40,000,000'.
### Response:
def _combine_contract(self, years, wages):
"""
Combine the contract wages and year.
Match the wages with the year and add to a dictionary representing the
player's contract.
Parameters
----------
years : list
A list where each element is a string denoting the season, such as
'2017-18'.
wages : list
A list of all wages where each element is a string denoting the
dollar amount, such as '$40,000,000'.
Returns
-------
dictionary
Returns a dictionary representing the player's contract where each
key is a ``string`` of the season, such as '2017-18' and each value
is a ``string`` of the wages, such as '$40,000,000'.
"""
contract = {}
for i in range(len(years)):
contract[years[i]] = wages[i]
return contract |
def two_numbers(
cls, request,
operation: (Ptypes.path,
String('One of the 4 arithmetic operations.',
enum=['add', 'sub', 'mul', 'div'])),
first: (Ptypes.path,
Float('The first operand.')),
second: (Ptypes.path,
Float('The second operand.'))) -> [
(200, 'Ok', Float),
(400, 'Wrong number format or invalid operation'),
(422, 'NaN')]:
'''Any of the four arithmetic operation on two numbers.'''
log.info('Performing {} on {} and {}'.format(operation, first, second))
try:
first = float(first)
second = float(second)
except ValueError:
Respond(400)
if operation == 'add':
Respond(200, first + second)
elif operation == 'sub':
Respond(200, first - second)
elif operation == 'mul':
Respond(200, first * second)
elif operation == 'div':
if second == 0:
Respond(422)
Respond(200, first / second)
else:
Respond(400) | Any of the four arithmetic operation on two numbers. | Below is the the instruction that describes the task:
### Input:
Any of the four arithmetic operation on two numbers.
### Response:
def two_numbers(
cls, request,
operation: (Ptypes.path,
String('One of the 4 arithmetic operations.',
enum=['add', 'sub', 'mul', 'div'])),
first: (Ptypes.path,
Float('The first operand.')),
second: (Ptypes.path,
Float('The second operand.'))) -> [
(200, 'Ok', Float),
(400, 'Wrong number format or invalid operation'),
(422, 'NaN')]:
'''Any of the four arithmetic operation on two numbers.'''
log.info('Performing {} on {} and {}'.format(operation, first, second))
try:
first = float(first)
second = float(second)
except ValueError:
Respond(400)
if operation == 'add':
Respond(200, first + second)
elif operation == 'sub':
Respond(200, first - second)
elif operation == 'mul':
Respond(200, first * second)
elif operation == 'div':
if second == 0:
Respond(422)
Respond(200, first / second)
else:
Respond(400) |
def run_on_executor(*args: Any, **kwargs: Any) -> Callable:
"""Decorator to run a synchronous method asynchronously on an executor.
The decorated method may be called with a ``callback`` keyword
argument and returns a future.
The executor to be used is determined by the ``executor``
attributes of ``self``. To use a different attribute name, pass a
keyword argument to the decorator::
@run_on_executor(executor='_thread_pool')
def foo(self):
pass
This decorator should not be confused with the similarly-named
`.IOLoop.run_in_executor`. In general, using ``run_in_executor``
when *calling* a blocking method is recommended instead of using
this decorator when *defining* a method. If compatibility with older
versions of Tornado is required, consider defining an executor
and using ``executor.submit()`` at the call site.
.. versionchanged:: 4.2
Added keyword arguments to use alternative attributes.
.. versionchanged:: 5.0
Always uses the current IOLoop instead of ``self.io_loop``.
.. versionchanged:: 5.1
Returns a `.Future` compatible with ``await`` instead of a
`concurrent.futures.Future`.
.. deprecated:: 5.1
The ``callback`` argument is deprecated and will be removed in
6.0. The decorator itself is discouraged in new code but will
not be removed in 6.0.
.. versionchanged:: 6.0
The ``callback`` argument was removed.
"""
# Fully type-checking decorators is tricky, and this one is
# discouraged anyway so it doesn't have all the generic magic.
def run_on_executor_decorator(fn: Callable) -> Callable[..., Future]:
executor = kwargs.get("executor", "executor")
@functools.wraps(fn)
def wrapper(self: Any, *args: Any, **kwargs: Any) -> Future:
async_future = Future() # type: Future
conc_future = getattr(self, executor).submit(fn, self, *args, **kwargs)
chain_future(conc_future, async_future)
return async_future
return wrapper
if args and kwargs:
raise ValueError("cannot combine positional and keyword args")
if len(args) == 1:
return run_on_executor_decorator(args[0])
elif len(args) != 0:
raise ValueError("expected 1 argument, got %d", len(args))
return run_on_executor_decorator | Decorator to run a synchronous method asynchronously on an executor.
The decorated method may be called with a ``callback`` keyword
argument and returns a future.
The executor to be used is determined by the ``executor``
attributes of ``self``. To use a different attribute name, pass a
keyword argument to the decorator::
@run_on_executor(executor='_thread_pool')
def foo(self):
pass
This decorator should not be confused with the similarly-named
`.IOLoop.run_in_executor`. In general, using ``run_in_executor``
when *calling* a blocking method is recommended instead of using
this decorator when *defining* a method. If compatibility with older
versions of Tornado is required, consider defining an executor
and using ``executor.submit()`` at the call site.
.. versionchanged:: 4.2
Added keyword arguments to use alternative attributes.
.. versionchanged:: 5.0
Always uses the current IOLoop instead of ``self.io_loop``.
.. versionchanged:: 5.1
Returns a `.Future` compatible with ``await`` instead of a
`concurrent.futures.Future`.
.. deprecated:: 5.1
The ``callback`` argument is deprecated and will be removed in
6.0. The decorator itself is discouraged in new code but will
not be removed in 6.0.
.. versionchanged:: 6.0
The ``callback`` argument was removed. | Below is the the instruction that describes the task:
### Input:
Decorator to run a synchronous method asynchronously on an executor.
The decorated method may be called with a ``callback`` keyword
argument and returns a future.
The executor to be used is determined by the ``executor``
attributes of ``self``. To use a different attribute name, pass a
keyword argument to the decorator::
@run_on_executor(executor='_thread_pool')
def foo(self):
pass
This decorator should not be confused with the similarly-named
`.IOLoop.run_in_executor`. In general, using ``run_in_executor``
when *calling* a blocking method is recommended instead of using
this decorator when *defining* a method. If compatibility with older
versions of Tornado is required, consider defining an executor
and using ``executor.submit()`` at the call site.
.. versionchanged:: 4.2
Added keyword arguments to use alternative attributes.
.. versionchanged:: 5.0
Always uses the current IOLoop instead of ``self.io_loop``.
.. versionchanged:: 5.1
Returns a `.Future` compatible with ``await`` instead of a
`concurrent.futures.Future`.
.. deprecated:: 5.1
The ``callback`` argument is deprecated and will be removed in
6.0. The decorator itself is discouraged in new code but will
not be removed in 6.0.
.. versionchanged:: 6.0
The ``callback`` argument was removed.
### Response:
def run_on_executor(*args: Any, **kwargs: Any) -> Callable:
"""Decorator to run a synchronous method asynchronously on an executor.
The decorated method may be called with a ``callback`` keyword
argument and returns a future.
The executor to be used is determined by the ``executor``
attributes of ``self``. To use a different attribute name, pass a
keyword argument to the decorator::
@run_on_executor(executor='_thread_pool')
def foo(self):
pass
This decorator should not be confused with the similarly-named
`.IOLoop.run_in_executor`. In general, using ``run_in_executor``
when *calling* a blocking method is recommended instead of using
this decorator when *defining* a method. If compatibility with older
versions of Tornado is required, consider defining an executor
and using ``executor.submit()`` at the call site.
.. versionchanged:: 4.2
Added keyword arguments to use alternative attributes.
.. versionchanged:: 5.0
Always uses the current IOLoop instead of ``self.io_loop``.
.. versionchanged:: 5.1
Returns a `.Future` compatible with ``await`` instead of a
`concurrent.futures.Future`.
.. deprecated:: 5.1
The ``callback`` argument is deprecated and will be removed in
6.0. The decorator itself is discouraged in new code but will
not be removed in 6.0.
.. versionchanged:: 6.0
The ``callback`` argument was removed.
"""
# Fully type-checking decorators is tricky, and this one is
# discouraged anyway so it doesn't have all the generic magic.
def run_on_executor_decorator(fn: Callable) -> Callable[..., Future]:
executor = kwargs.get("executor", "executor")
@functools.wraps(fn)
def wrapper(self: Any, *args: Any, **kwargs: Any) -> Future:
async_future = Future() # type: Future
conc_future = getattr(self, executor).submit(fn, self, *args, **kwargs)
chain_future(conc_future, async_future)
return async_future
return wrapper
if args and kwargs:
raise ValueError("cannot combine positional and keyword args")
if len(args) == 1:
return run_on_executor_decorator(args[0])
elif len(args) != 0:
raise ValueError("expected 1 argument, got %d", len(args))
return run_on_executor_decorator |
def p_expr_div_expr(p):
""" expr : expr BAND expr
| expr BOR expr
| expr BXOR expr
| expr PLUS expr
| expr MINUS expr
| expr MUL expr
| expr DIV expr
| expr MOD expr
| expr POW expr
| expr LSHIFT expr
| expr RSHIFT expr
| pexpr BAND expr
| pexpr BOR expr
| pexpr BXOR expr
| pexpr PLUS expr
| pexpr MINUS expr
| pexpr MUL expr
| pexpr DIV expr
| pexpr MOD expr
| pexpr POW expr
| pexpr LSHIFT expr
| pexpr RSHIFT expr
| expr BAND pexpr
| expr BOR pexpr
| expr BXOR pexpr
| expr PLUS pexpr
| expr MINUS pexpr
| expr MUL pexpr
| expr DIV pexpr
| expr MOD pexpr
| expr POW pexpr
| expr LSHIFT pexpr
| expr RSHIFT pexpr
| pexpr BAND pexpr
| pexpr BOR pexpr
| pexpr BXOR pexpr
| pexpr PLUS pexpr
| pexpr MINUS pexpr
| pexpr MUL pexpr
| pexpr DIV pexpr
| pexpr MOD pexpr
| pexpr POW pexpr
| pexpr LSHIFT pexpr
| pexpr RSHIFT pexpr
"""
p[0] = Expr.makenode(Container(p[2], p.lineno(2)), p[1], p[3]) | expr : expr BAND expr
| expr BOR expr
| expr BXOR expr
| expr PLUS expr
| expr MINUS expr
| expr MUL expr
| expr DIV expr
| expr MOD expr
| expr POW expr
| expr LSHIFT expr
| expr RSHIFT expr
| pexpr BAND expr
| pexpr BOR expr
| pexpr BXOR expr
| pexpr PLUS expr
| pexpr MINUS expr
| pexpr MUL expr
| pexpr DIV expr
| pexpr MOD expr
| pexpr POW expr
| pexpr LSHIFT expr
| pexpr RSHIFT expr
| expr BAND pexpr
| expr BOR pexpr
| expr BXOR pexpr
| expr PLUS pexpr
| expr MINUS pexpr
| expr MUL pexpr
| expr DIV pexpr
| expr MOD pexpr
| expr POW pexpr
| expr LSHIFT pexpr
| expr RSHIFT pexpr
| pexpr BAND pexpr
| pexpr BOR pexpr
| pexpr BXOR pexpr
| pexpr PLUS pexpr
| pexpr MINUS pexpr
| pexpr MUL pexpr
| pexpr DIV pexpr
| pexpr MOD pexpr
| pexpr POW pexpr
| pexpr LSHIFT pexpr
| pexpr RSHIFT pexpr | Below is the the instruction that describes the task:
### Input:
expr : expr BAND expr
| expr BOR expr
| expr BXOR expr
| expr PLUS expr
| expr MINUS expr
| expr MUL expr
| expr DIV expr
| expr MOD expr
| expr POW expr
| expr LSHIFT expr
| expr RSHIFT expr
| pexpr BAND expr
| pexpr BOR expr
| pexpr BXOR expr
| pexpr PLUS expr
| pexpr MINUS expr
| pexpr MUL expr
| pexpr DIV expr
| pexpr MOD expr
| pexpr POW expr
| pexpr LSHIFT expr
| pexpr RSHIFT expr
| expr BAND pexpr
| expr BOR pexpr
| expr BXOR pexpr
| expr PLUS pexpr
| expr MINUS pexpr
| expr MUL pexpr
| expr DIV pexpr
| expr MOD pexpr
| expr POW pexpr
| expr LSHIFT pexpr
| expr RSHIFT pexpr
| pexpr BAND pexpr
| pexpr BOR pexpr
| pexpr BXOR pexpr
| pexpr PLUS pexpr
| pexpr MINUS pexpr
| pexpr MUL pexpr
| pexpr DIV pexpr
| pexpr MOD pexpr
| pexpr POW pexpr
| pexpr LSHIFT pexpr
| pexpr RSHIFT pexpr
### Response:
def p_expr_div_expr(p):
""" expr : expr BAND expr
| expr BOR expr
| expr BXOR expr
| expr PLUS expr
| expr MINUS expr
| expr MUL expr
| expr DIV expr
| expr MOD expr
| expr POW expr
| expr LSHIFT expr
| expr RSHIFT expr
| pexpr BAND expr
| pexpr BOR expr
| pexpr BXOR expr
| pexpr PLUS expr
| pexpr MINUS expr
| pexpr MUL expr
| pexpr DIV expr
| pexpr MOD expr
| pexpr POW expr
| pexpr LSHIFT expr
| pexpr RSHIFT expr
| expr BAND pexpr
| expr BOR pexpr
| expr BXOR pexpr
| expr PLUS pexpr
| expr MINUS pexpr
| expr MUL pexpr
| expr DIV pexpr
| expr MOD pexpr
| expr POW pexpr
| expr LSHIFT pexpr
| expr RSHIFT pexpr
| pexpr BAND pexpr
| pexpr BOR pexpr
| pexpr BXOR pexpr
| pexpr PLUS pexpr
| pexpr MINUS pexpr
| pexpr MUL pexpr
| pexpr DIV pexpr
| pexpr MOD pexpr
| pexpr POW pexpr
| pexpr LSHIFT pexpr
| pexpr RSHIFT pexpr
"""
p[0] = Expr.makenode(Container(p[2], p.lineno(2)), p[1], p[3]) |
def _get_run_by_other_worker(worker):
"""
This returns a set of the tasks that are being run by other worker
"""
task_sets = _get_external_workers(worker).values()
return functools.reduce(lambda a, b: a | b, task_sets, set()) | This returns a set of the tasks that are being run by other worker | Below is the the instruction that describes the task:
### Input:
This returns a set of the tasks that are being run by other worker
### Response:
def _get_run_by_other_worker(worker):
"""
This returns a set of the tasks that are being run by other worker
"""
task_sets = _get_external_workers(worker).values()
return functools.reduce(lambda a, b: a | b, task_sets, set()) |
def _delete(self, namespace, stream, start_id, end_time, configuration):
"""
Delete events with id > `start_id` and end_time <= `end_time`.
"""
start_id_event = Event(start_id)
end_id_event = Event(uuid_from_kronos_time(end_time,
_type=UUIDType.HIGHEST))
stream_events = self.db[namespace][stream]
# Find the interval our events belong to.
lo = bisect.bisect_left(stream_events, start_id_event)
if lo + 1 > len(stream_events):
return 0, []
if stream_events[lo] == start_id_event:
lo += 1
hi = bisect.bisect_right(stream_events, end_id_event)
del stream_events[lo:hi]
return max(0, hi - lo), [] | Delete events with id > `start_id` and end_time <= `end_time`. | Below is the the instruction that describes the task:
### Input:
Delete events with id > `start_id` and end_time <= `end_time`.
### Response:
def _delete(self, namespace, stream, start_id, end_time, configuration):
"""
Delete events with id > `start_id` and end_time <= `end_time`.
"""
start_id_event = Event(start_id)
end_id_event = Event(uuid_from_kronos_time(end_time,
_type=UUIDType.HIGHEST))
stream_events = self.db[namespace][stream]
# Find the interval our events belong to.
lo = bisect.bisect_left(stream_events, start_id_event)
if lo + 1 > len(stream_events):
return 0, []
if stream_events[lo] == start_id_event:
lo += 1
hi = bisect.bisect_right(stream_events, end_id_event)
del stream_events[lo:hi]
return max(0, hi - lo), [] |
def getProjectDescriptor(self, dir):
"""
Detects the .uproject descriptor file for the Unreal project in the specified directory
"""
for project in glob.glob(os.path.join(dir, '*.uproject')):
return os.path.realpath(project)
# No project detected
raise UnrealManagerException('could not detect an Unreal project in the current directory') | Detects the .uproject descriptor file for the Unreal project in the specified directory | Below is the the instruction that describes the task:
### Input:
Detects the .uproject descriptor file for the Unreal project in the specified directory
### Response:
def getProjectDescriptor(self, dir):
"""
Detects the .uproject descriptor file for the Unreal project in the specified directory
"""
for project in glob.glob(os.path.join(dir, '*.uproject')):
return os.path.realpath(project)
# No project detected
raise UnrealManagerException('could not detect an Unreal project in the current directory') |
def add_item(self,item):
"""Add an item to `self`.
:Parameters:
- `item`: the item to add.
:Types:
- `item`: `MucItemBase`
"""
if not isinstance(item,MucItemBase):
raise TypeError("Bad item type for muc#user")
item.as_xml(self.xmlnode) | Add an item to `self`.
:Parameters:
- `item`: the item to add.
:Types:
- `item`: `MucItemBase` | Below is the the instruction that describes the task:
### Input:
Add an item to `self`.
:Parameters:
- `item`: the item to add.
:Types:
- `item`: `MucItemBase`
### Response:
def add_item(self,item):
"""Add an item to `self`.
:Parameters:
- `item`: the item to add.
:Types:
- `item`: `MucItemBase`
"""
if not isinstance(item,MucItemBase):
raise TypeError("Bad item type for muc#user")
item.as_xml(self.xmlnode) |
def create_calc_dh_d_shape(estimator):
"""
Return the function that can be used in the various gradient and hessian
calculations to calculate the derivative of the transformation with respect
to the shape parameters.
Parameters
----------
estimator : an instance of the estimation.LogitTypeEstimator class.
Should contain a `rows_to_alts` attribute that is a 2D scipy sparse
matrix that maps the rows of the `design` matrix to the alternatives
available in this dataset.
Returns
-------
Callable.
Will accept a 1D array of systematic utility values, a 1D array of
alternative IDs, (shape parameters if there are any) and miscellaneous
args and kwargs. Should return a 2D array whose elements contain the
derivative of the tranformed utility vector with respect to the vector
of shape parameters. The dimensions of the returned vector should
be `(design.shape[0], num_alternatives)`.
"""
dh_d_shape = estimator.rows_to_alts.copy()
# Create a function that will take in the pre-formed matrix, replace its
# data in-place with the new data, and return the correct dh_dshape on each
# iteration of the minimizer
calc_dh_d_shape = partial(_uneven_transform_deriv_shape,
output_array=dh_d_shape)
return calc_dh_d_shape | Return the function that can be used in the various gradient and hessian
calculations to calculate the derivative of the transformation with respect
to the shape parameters.
Parameters
----------
estimator : an instance of the estimation.LogitTypeEstimator class.
Should contain a `rows_to_alts` attribute that is a 2D scipy sparse
matrix that maps the rows of the `design` matrix to the alternatives
available in this dataset.
Returns
-------
Callable.
Will accept a 1D array of systematic utility values, a 1D array of
alternative IDs, (shape parameters if there are any) and miscellaneous
args and kwargs. Should return a 2D array whose elements contain the
derivative of the tranformed utility vector with respect to the vector
of shape parameters. The dimensions of the returned vector should
be `(design.shape[0], num_alternatives)`. | Below is the the instruction that describes the task:
### Input:
Return the function that can be used in the various gradient and hessian
calculations to calculate the derivative of the transformation with respect
to the shape parameters.
Parameters
----------
estimator : an instance of the estimation.LogitTypeEstimator class.
Should contain a `rows_to_alts` attribute that is a 2D scipy sparse
matrix that maps the rows of the `design` matrix to the alternatives
available in this dataset.
Returns
-------
Callable.
Will accept a 1D array of systematic utility values, a 1D array of
alternative IDs, (shape parameters if there are any) and miscellaneous
args and kwargs. Should return a 2D array whose elements contain the
derivative of the tranformed utility vector with respect to the vector
of shape parameters. The dimensions of the returned vector should
be `(design.shape[0], num_alternatives)`.
### Response:
def create_calc_dh_d_shape(estimator):
"""
Return the function that can be used in the various gradient and hessian
calculations to calculate the derivative of the transformation with respect
to the shape parameters.
Parameters
----------
estimator : an instance of the estimation.LogitTypeEstimator class.
Should contain a `rows_to_alts` attribute that is a 2D scipy sparse
matrix that maps the rows of the `design` matrix to the alternatives
available in this dataset.
Returns
-------
Callable.
Will accept a 1D array of systematic utility values, a 1D array of
alternative IDs, (shape parameters if there are any) and miscellaneous
args and kwargs. Should return a 2D array whose elements contain the
derivative of the tranformed utility vector with respect to the vector
of shape parameters. The dimensions of the returned vector should
be `(design.shape[0], num_alternatives)`.
"""
dh_d_shape = estimator.rows_to_alts.copy()
# Create a function that will take in the pre-formed matrix, replace its
# data in-place with the new data, and return the correct dh_dshape on each
# iteration of the minimizer
calc_dh_d_shape = partial(_uneven_transform_deriv_shape,
output_array=dh_d_shape)
return calc_dh_d_shape |
def _randomize_subject_list(data_list, random):
"""Randomly permute the voxels of a subject list.
The method shuffles the subject one by one in place according to
the random type. If RandomType.NORANDOM, return the original list.
Parameters
----------
data_list: list of 2D array in shape [nVxels, nTRs]
Activity image data list to be shuffled.
random: RandomType
Randomization type.
Returns
-------
None.
"""
if random == RandomType.REPRODUCIBLE:
for i in range(len(data_list)):
_randomize_single_subject(data_list[i], seed=i)
elif random == RandomType.UNREPRODUCIBLE:
for data in data_list:
_randomize_single_subject(data) | Randomly permute the voxels of a subject list.
The method shuffles the subject one by one in place according to
the random type. If RandomType.NORANDOM, return the original list.
Parameters
----------
data_list: list of 2D array in shape [nVxels, nTRs]
Activity image data list to be shuffled.
random: RandomType
Randomization type.
Returns
-------
None. | Below is the the instruction that describes the task:
### Input:
Randomly permute the voxels of a subject list.
The method shuffles the subject one by one in place according to
the random type. If RandomType.NORANDOM, return the original list.
Parameters
----------
data_list: list of 2D array in shape [nVxels, nTRs]
Activity image data list to be shuffled.
random: RandomType
Randomization type.
Returns
-------
None.
### Response:
def _randomize_subject_list(data_list, random):
"""Randomly permute the voxels of a subject list.
The method shuffles the subject one by one in place according to
the random type. If RandomType.NORANDOM, return the original list.
Parameters
----------
data_list: list of 2D array in shape [nVxels, nTRs]
Activity image data list to be shuffled.
random: RandomType
Randomization type.
Returns
-------
None.
"""
if random == RandomType.REPRODUCIBLE:
for i in range(len(data_list)):
_randomize_single_subject(data_list[i], seed=i)
elif random == RandomType.UNREPRODUCIBLE:
for data in data_list:
_randomize_single_subject(data) |
def target_info_from_filename(filename):
"""Transforms /some/path/foo.png into ('/some/path', 'foo.png', 'png')."""
basename = osp.basename(filename)
storedir = osp.dirname(osp.abspath(filename))
target = filename.split(".")[-1]
return storedir, basename, target | Transforms /some/path/foo.png into ('/some/path', 'foo.png', 'png'). | Below is the the instruction that describes the task:
### Input:
Transforms /some/path/foo.png into ('/some/path', 'foo.png', 'png').
### Response:
def target_info_from_filename(filename):
"""Transforms /some/path/foo.png into ('/some/path', 'foo.png', 'png')."""
basename = osp.basename(filename)
storedir = osp.dirname(osp.abspath(filename))
target = filename.split(".")[-1]
return storedir, basename, target |
def calculate(self, T, method):
r'''Method to calculate low-pressure liquid viscosity at tempearture
`T` with a given method.
This method has no exception handling; see `T_dependent_property`
for that.
Parameters
----------
T : float
Temperature at which to calculate viscosity, [K]
method : str
Name of the method to use
Returns
-------
mu : float
Viscosity of the liquid at T and a low pressure, [Pa*S]
'''
if method == DUTT_PRASAD:
A, B, C = self.DUTT_PRASAD_coeffs
mu = ViswanathNatarajan3(T, A, B, C, )
elif method == VISWANATH_NATARAJAN_3:
A, B, C = self.VISWANATH_NATARAJAN_3_coeffs
mu = ViswanathNatarajan3(T, A, B, C)
elif method == VISWANATH_NATARAJAN_2:
A, B = self.VISWANATH_NATARAJAN_2_coeffs
mu = ViswanathNatarajan2(T, self.VISWANATH_NATARAJAN_2_coeffs[0], self.VISWANATH_NATARAJAN_2_coeffs[1])
elif method == VISWANATH_NATARAJAN_2E:
C, D = self.VISWANATH_NATARAJAN_2E_coeffs
mu = ViswanathNatarajan2Exponential(T, C, D)
elif method == DIPPR_PERRY_8E:
mu = EQ101(T, *self.Perrys2_313_coeffs)
elif method == COOLPROP:
mu = CoolProp_T_dependent_property(T, self.CASRN, 'V', 'l')
elif method == LETSOU_STIEL:
mu = Letsou_Stiel(T, self.MW, self.Tc, self.Pc, self.omega)
elif method == PRZEDZIECKI_SRIDHAR:
Vml = self.Vml(T) if hasattr(self.Vml, '__call__') else self.Vml
mu = Przedziecki_Sridhar(T, self.Tm, self.Tc, self.Pc, self.Vc, Vml, self.omega, self.MW)
elif method == VDI_PPDS:
A, B, C, D, E = self.VDI_PPDS_coeffs
term = (C - T)/(T-D)
if term < 0:
term1 = -((T - C)/(T-D))**(1/3.)
else:
term1 = term**(1/3.)
term2 = term*term1
mu = E*exp(A*term1 + B*term2)
elif method in self.tabular_data:
mu = self.interpolate(T, method)
return mu | r'''Method to calculate low-pressure liquid viscosity at tempearture
`T` with a given method.
This method has no exception handling; see `T_dependent_property`
for that.
Parameters
----------
T : float
Temperature at which to calculate viscosity, [K]
method : str
Name of the method to use
Returns
-------
mu : float
Viscosity of the liquid at T and a low pressure, [Pa*S] | Below is the the instruction that describes the task:
### Input:
r'''Method to calculate low-pressure liquid viscosity at tempearture
`T` with a given method.
This method has no exception handling; see `T_dependent_property`
for that.
Parameters
----------
T : float
Temperature at which to calculate viscosity, [K]
method : str
Name of the method to use
Returns
-------
mu : float
Viscosity of the liquid at T and a low pressure, [Pa*S]
### Response:
def calculate(self, T, method):
r'''Method to calculate low-pressure liquid viscosity at tempearture
`T` with a given method.
This method has no exception handling; see `T_dependent_property`
for that.
Parameters
----------
T : float
Temperature at which to calculate viscosity, [K]
method : str
Name of the method to use
Returns
-------
mu : float
Viscosity of the liquid at T and a low pressure, [Pa*S]
'''
if method == DUTT_PRASAD:
A, B, C = self.DUTT_PRASAD_coeffs
mu = ViswanathNatarajan3(T, A, B, C, )
elif method == VISWANATH_NATARAJAN_3:
A, B, C = self.VISWANATH_NATARAJAN_3_coeffs
mu = ViswanathNatarajan3(T, A, B, C)
elif method == VISWANATH_NATARAJAN_2:
A, B = self.VISWANATH_NATARAJAN_2_coeffs
mu = ViswanathNatarajan2(T, self.VISWANATH_NATARAJAN_2_coeffs[0], self.VISWANATH_NATARAJAN_2_coeffs[1])
elif method == VISWANATH_NATARAJAN_2E:
C, D = self.VISWANATH_NATARAJAN_2E_coeffs
mu = ViswanathNatarajan2Exponential(T, C, D)
elif method == DIPPR_PERRY_8E:
mu = EQ101(T, *self.Perrys2_313_coeffs)
elif method == COOLPROP:
mu = CoolProp_T_dependent_property(T, self.CASRN, 'V', 'l')
elif method == LETSOU_STIEL:
mu = Letsou_Stiel(T, self.MW, self.Tc, self.Pc, self.omega)
elif method == PRZEDZIECKI_SRIDHAR:
Vml = self.Vml(T) if hasattr(self.Vml, '__call__') else self.Vml
mu = Przedziecki_Sridhar(T, self.Tm, self.Tc, self.Pc, self.Vc, Vml, self.omega, self.MW)
elif method == VDI_PPDS:
A, B, C, D, E = self.VDI_PPDS_coeffs
term = (C - T)/(T-D)
if term < 0:
term1 = -((T - C)/(T-D))**(1/3.)
else:
term1 = term**(1/3.)
term2 = term*term1
mu = E*exp(A*term1 + B*term2)
elif method in self.tabular_data:
mu = self.interpolate(T, method)
return mu |
def lazygo(watchFolder='../abfs/',reAnalyze=False,rebuildSite=False,
keepGoing=True,matching=False):
"""
continuously monitor a folder for new abfs and try to analyze them.
This is intended to watch only one folder, but can run multiple copies.
"""
abfsKnown=[]
while True:
print()
pagesNeeded=[]
for fname in glob.glob(watchFolder+"/*.abf"):
ID=os.path.basename(fname).replace(".abf","")
if not fname in abfsKnown:
if os.path.exists(fname.replace(".abf",".rsv")): #TODO: or something like this
continue
if matching and not matching in fname:
continue
abfsKnown.append(fname)
if os.path.exists(os.path.dirname(fname)+"/swhlab4/"+os.path.basename(fname).replace(".abf","_info.pkl")) and reAnalyze==False:
print("already analyzed",os.path.basename(fname))
if rebuildSite:
pagesNeeded.append(ID)
else:
handleNewABF(fname)
pagesNeeded.append(ID)
if len(pagesNeeded):
print(" -- rebuilding index page")
indexing.genIndex(os.path.dirname(fname),forceIDs=pagesNeeded)
if not keepGoing:
return
for i in range(50):
print('.',end='')
time.sleep(.2) | continuously monitor a folder for new abfs and try to analyze them.
This is intended to watch only one folder, but can run multiple copies. | Below is the the instruction that describes the task:
### Input:
continuously monitor a folder for new abfs and try to analyze them.
This is intended to watch only one folder, but can run multiple copies.
### Response:
def lazygo(watchFolder='../abfs/',reAnalyze=False,rebuildSite=False,
keepGoing=True,matching=False):
"""
continuously monitor a folder for new abfs and try to analyze them.
This is intended to watch only one folder, but can run multiple copies.
"""
abfsKnown=[]
while True:
print()
pagesNeeded=[]
for fname in glob.glob(watchFolder+"/*.abf"):
ID=os.path.basename(fname).replace(".abf","")
if not fname in abfsKnown:
if os.path.exists(fname.replace(".abf",".rsv")): #TODO: or something like this
continue
if matching and not matching in fname:
continue
abfsKnown.append(fname)
if os.path.exists(os.path.dirname(fname)+"/swhlab4/"+os.path.basename(fname).replace(".abf","_info.pkl")) and reAnalyze==False:
print("already analyzed",os.path.basename(fname))
if rebuildSite:
pagesNeeded.append(ID)
else:
handleNewABF(fname)
pagesNeeded.append(ID)
if len(pagesNeeded):
print(" -- rebuilding index page")
indexing.genIndex(os.path.dirname(fname),forceIDs=pagesNeeded)
if not keepGoing:
return
for i in range(50):
print('.',end='')
time.sleep(.2) |
def _get_view_media(self):
"""
Gather view-level media assets
"""
try:
css = self.Media.css
except AttributeError:
css = {}
try:
js = self.Media.js
except AttributeError:
js = []
return Media(css=css, js=js) | Gather view-level media assets | Below is the the instruction that describes the task:
### Input:
Gather view-level media assets
### Response:
def _get_view_media(self):
"""
Gather view-level media assets
"""
try:
css = self.Media.css
except AttributeError:
css = {}
try:
js = self.Media.js
except AttributeError:
js = []
return Media(css=css, js=js) |
def add_callback(self, fn, *args, **kwargs):
"""
Attaches a callback function to be called when the final results arrive.
By default, `fn` will be called with the results as the first and only
argument. If `*args` or `**kwargs` are supplied, they will be passed
through as additional positional or keyword arguments to `fn`.
If an error is hit while executing the operation, a callback attached
here will not be called. Use :meth:`.add_errback()` or :meth:`add_callbacks()`
if you wish to handle that case.
If the final result has already been seen when this method is called,
the callback will be called immediately (before this method returns).
Note: in the case that the result is not available when the callback is added,
the callback is executed by IO event thread. This means that the callback
should not block or attempt further synchronous requests, because no further
IO will be processed until the callback returns.
**Important**: if the callback you attach results in an exception being
raised, **the exception will be ignored**, so please ensure your
callback handles all error cases that you care about.
Usage example::
>>> session = cluster.connect("mykeyspace")
>>> def handle_results(rows, start_time, should_log=False):
... if should_log:
... log.info("Total time: %f", time.time() - start_time)
... ...
>>> future = session.execute_async("SELECT * FROM users")
>>> future.add_callback(handle_results, time.time(), should_log=True)
"""
run_now = False
with self._callback_lock:
# Always add fn to self._callbacks, even when we're about to
# execute it, to prevent races with functions like
# start_fetching_next_page that reset _final_result
self._callbacks.append((fn, args, kwargs))
if self._final_result is not _NOT_SET:
run_now = True
if run_now:
fn(self._final_result, *args, **kwargs)
return self | Attaches a callback function to be called when the final results arrive.
By default, `fn` will be called with the results as the first and only
argument. If `*args` or `**kwargs` are supplied, they will be passed
through as additional positional or keyword arguments to `fn`.
If an error is hit while executing the operation, a callback attached
here will not be called. Use :meth:`.add_errback()` or :meth:`add_callbacks()`
if you wish to handle that case.
If the final result has already been seen when this method is called,
the callback will be called immediately (before this method returns).
Note: in the case that the result is not available when the callback is added,
the callback is executed by IO event thread. This means that the callback
should not block or attempt further synchronous requests, because no further
IO will be processed until the callback returns.
**Important**: if the callback you attach results in an exception being
raised, **the exception will be ignored**, so please ensure your
callback handles all error cases that you care about.
Usage example::
>>> session = cluster.connect("mykeyspace")
>>> def handle_results(rows, start_time, should_log=False):
... if should_log:
... log.info("Total time: %f", time.time() - start_time)
... ...
>>> future = session.execute_async("SELECT * FROM users")
>>> future.add_callback(handle_results, time.time(), should_log=True) | Below is the the instruction that describes the task:
### Input:
Attaches a callback function to be called when the final results arrive.
By default, `fn` will be called with the results as the first and only
argument. If `*args` or `**kwargs` are supplied, they will be passed
through as additional positional or keyword arguments to `fn`.
If an error is hit while executing the operation, a callback attached
here will not be called. Use :meth:`.add_errback()` or :meth:`add_callbacks()`
if you wish to handle that case.
If the final result has already been seen when this method is called,
the callback will be called immediately (before this method returns).
Note: in the case that the result is not available when the callback is added,
the callback is executed by IO event thread. This means that the callback
should not block or attempt further synchronous requests, because no further
IO will be processed until the callback returns.
**Important**: if the callback you attach results in an exception being
raised, **the exception will be ignored**, so please ensure your
callback handles all error cases that you care about.
Usage example::
>>> session = cluster.connect("mykeyspace")
>>> def handle_results(rows, start_time, should_log=False):
... if should_log:
... log.info("Total time: %f", time.time() - start_time)
... ...
>>> future = session.execute_async("SELECT * FROM users")
>>> future.add_callback(handle_results, time.time(), should_log=True)
### Response:
def add_callback(self, fn, *args, **kwargs):
"""
Attaches a callback function to be called when the final results arrive.
By default, `fn` will be called with the results as the first and only
argument. If `*args` or `**kwargs` are supplied, they will be passed
through as additional positional or keyword arguments to `fn`.
If an error is hit while executing the operation, a callback attached
here will not be called. Use :meth:`.add_errback()` or :meth:`add_callbacks()`
if you wish to handle that case.
If the final result has already been seen when this method is called,
the callback will be called immediately (before this method returns).
Note: in the case that the result is not available when the callback is added,
the callback is executed by IO event thread. This means that the callback
should not block or attempt further synchronous requests, because no further
IO will be processed until the callback returns.
**Important**: if the callback you attach results in an exception being
raised, **the exception will be ignored**, so please ensure your
callback handles all error cases that you care about.
Usage example::
>>> session = cluster.connect("mykeyspace")
>>> def handle_results(rows, start_time, should_log=False):
... if should_log:
... log.info("Total time: %f", time.time() - start_time)
... ...
>>> future = session.execute_async("SELECT * FROM users")
>>> future.add_callback(handle_results, time.time(), should_log=True)
"""
run_now = False
with self._callback_lock:
# Always add fn to self._callbacks, even when we're about to
# execute it, to prevent races with functions like
# start_fetching_next_page that reset _final_result
self._callbacks.append((fn, args, kwargs))
if self._final_result is not _NOT_SET:
run_now = True
if run_now:
fn(self._final_result, *args, **kwargs)
return self |
def offset_to_skydir(skydir, offset_lon, offset_lat,
coordsys='CEL', projection='AIT'):
"""Convert a cartesian offset (X,Y) in the given projection into
a SkyCoord."""
offset_lon = np.array(offset_lon, ndmin=1)
offset_lat = np.array(offset_lat, ndmin=1)
w = create_wcs(skydir, coordsys, projection)
return SkyCoord.from_pixel(offset_lon, offset_lat, w, 0) | Convert a cartesian offset (X,Y) in the given projection into
a SkyCoord. | Below is the the instruction that describes the task:
### Input:
Convert a cartesian offset (X,Y) in the given projection into
a SkyCoord.
### Response:
def offset_to_skydir(skydir, offset_lon, offset_lat,
coordsys='CEL', projection='AIT'):
"""Convert a cartesian offset (X,Y) in the given projection into
a SkyCoord."""
offset_lon = np.array(offset_lon, ndmin=1)
offset_lat = np.array(offset_lat, ndmin=1)
w = create_wcs(skydir, coordsys, projection)
return SkyCoord.from_pixel(offset_lon, offset_lat, w, 0) |
def get_content(self):
"""
Returns content for cover page as HTML string. Content will be of type 'str' (Python 2) or 'bytes' (Python 3).
:Returns:
Returns content of this document.
"""
self.content = self.book.get_template('cover')
tree = parse_string(super(EpubCoverHtml, self).get_content())
tree_root = tree.getroot()
images = tree_root.xpath('//xhtml:img', namespaces={'xhtml': NAMESPACES['XHTML']})
images[0].set('src', self.image_name)
images[0].set('alt', self.title)
tree_str = etree.tostring(tree, pretty_print=True, encoding='utf-8', xml_declaration=True)
return tree_str | Returns content for cover page as HTML string. Content will be of type 'str' (Python 2) or 'bytes' (Python 3).
:Returns:
Returns content of this document. | Below is the the instruction that describes the task:
### Input:
Returns content for cover page as HTML string. Content will be of type 'str' (Python 2) or 'bytes' (Python 3).
:Returns:
Returns content of this document.
### Response:
def get_content(self):
"""
Returns content for cover page as HTML string. Content will be of type 'str' (Python 2) or 'bytes' (Python 3).
:Returns:
Returns content of this document.
"""
self.content = self.book.get_template('cover')
tree = parse_string(super(EpubCoverHtml, self).get_content())
tree_root = tree.getroot()
images = tree_root.xpath('//xhtml:img', namespaces={'xhtml': NAMESPACES['XHTML']})
images[0].set('src', self.image_name)
images[0].set('alt', self.title)
tree_str = etree.tostring(tree, pretty_print=True, encoding='utf-8', xml_declaration=True)
return tree_str |
def replace(self, key, value):
"""
Convenience method provided as a way to replace a value mapped by a
key.This is required since a MergingDict always merges via assignment
of item/attribute.
:param key: Attribute name or item key to replace rvalue for.
:type key: object
:param value: The new value to assign.
:type value: object
:return:
"""
super(MergingDict, self).__setitem__(key, value) | Convenience method provided as a way to replace a value mapped by a
key.This is required since a MergingDict always merges via assignment
of item/attribute.
:param key: Attribute name or item key to replace rvalue for.
:type key: object
:param value: The new value to assign.
:type value: object
:return: | Below is the the instruction that describes the task:
### Input:
Convenience method provided as a way to replace a value mapped by a
key.This is required since a MergingDict always merges via assignment
of item/attribute.
:param key: Attribute name or item key to replace rvalue for.
:type key: object
:param value: The new value to assign.
:type value: object
:return:
### Response:
def replace(self, key, value):
"""
Convenience method provided as a way to replace a value mapped by a
key.This is required since a MergingDict always merges via assignment
of item/attribute.
:param key: Attribute name or item key to replace rvalue for.
:type key: object
:param value: The new value to assign.
:type value: object
:return:
"""
super(MergingDict, self).__setitem__(key, value) |
def session_preparation(self):
"""Prepare the session after the connection has been established."""
self.set_base_prompt()
cmd = self.RETURN + "rows 0" + self.RETURN
self.disable_paging(command=cmd) | Prepare the session after the connection has been established. | Below is the the instruction that describes the task:
### Input:
Prepare the session after the connection has been established.
### Response:
def session_preparation(self):
"""Prepare the session after the connection has been established."""
self.set_base_prompt()
cmd = self.RETURN + "rows 0" + self.RETURN
self.disable_paging(command=cmd) |
def generate_data_type(self, data_type):
"""Output a data type definition (a struct or union)."""
if isinstance(data_type, Struct):
# Output a struct definition.
self.emit('')
self.emit('struct %s' % data_type.name)
with self.indent():
if data_type.doc is not None:
self.emit(self.format_string(data_type.doc))
for field in data_type.fields:
type_repr = self.format_data_type(field.data_type)
if not field.has_default:
self.emit('%s %s' % (field.name, type_repr))
else:
self.emit('%s %s = %s' %
(field.name, type_repr, self.format_value(field.default)))
if field.doc is not None:
with self.indent():
self.emit(self.format_value(field.doc))
elif isinstance(data_type, Union):
# Output a union definition.
self.emit('')
self.emit('union %s' % data_type.name)
with self.indent():
if data_type.doc is not None:
self.emit(self.format_string(data_type.doc))
for field in data_type.fields:
name = field.name
# Add a star for a catch-all field.
# (There are two ways to recognize these.)
if field.catch_all or field is data_type.catch_all_field:
name += '*'
if isinstance(field.data_type, Void):
self.emit('%s' % (name))
else:
type_repr = self.format_data_type(field.data_type)
self.emit('%s %s' % (name, type_repr))
if field.doc is not None:
with self.indent():
self.emit(self.format_value(field.doc))
else:
# Don't know what this is.
self.emit('')
self.emit('# ??? %s' % repr(data_type)) | Output a data type definition (a struct or union). | Below is the the instruction that describes the task:
### Input:
Output a data type definition (a struct or union).
### Response:
def generate_data_type(self, data_type):
"""Output a data type definition (a struct or union)."""
if isinstance(data_type, Struct):
# Output a struct definition.
self.emit('')
self.emit('struct %s' % data_type.name)
with self.indent():
if data_type.doc is not None:
self.emit(self.format_string(data_type.doc))
for field in data_type.fields:
type_repr = self.format_data_type(field.data_type)
if not field.has_default:
self.emit('%s %s' % (field.name, type_repr))
else:
self.emit('%s %s = %s' %
(field.name, type_repr, self.format_value(field.default)))
if field.doc is not None:
with self.indent():
self.emit(self.format_value(field.doc))
elif isinstance(data_type, Union):
# Output a union definition.
self.emit('')
self.emit('union %s' % data_type.name)
with self.indent():
if data_type.doc is not None:
self.emit(self.format_string(data_type.doc))
for field in data_type.fields:
name = field.name
# Add a star for a catch-all field.
# (There are two ways to recognize these.)
if field.catch_all or field is data_type.catch_all_field:
name += '*'
if isinstance(field.data_type, Void):
self.emit('%s' % (name))
else:
type_repr = self.format_data_type(field.data_type)
self.emit('%s %s' % (name, type_repr))
if field.doc is not None:
with self.indent():
self.emit(self.format_value(field.doc))
else:
# Don't know what this is.
self.emit('')
self.emit('# ??? %s' % repr(data_type)) |
def merge_files(sources, destination):
"""Copy content of multiple files into a single file.
:param list(str) sources: source file names (paths)
:param str destination: destination file name (path)
:return:
"""
with open(destination, 'w') as hout:
for f in sources:
if os.path.exists(f):
with open(f) as hin:
shutil.copyfileobj(hin, hout)
else:
logger.warning('File is missing: {}'.format(f)) | Copy content of multiple files into a single file.
:param list(str) sources: source file names (paths)
:param str destination: destination file name (path)
:return: | Below is the the instruction that describes the task:
### Input:
Copy content of multiple files into a single file.
:param list(str) sources: source file names (paths)
:param str destination: destination file name (path)
:return:
### Response:
def merge_files(sources, destination):
"""Copy content of multiple files into a single file.
:param list(str) sources: source file names (paths)
:param str destination: destination file name (path)
:return:
"""
with open(destination, 'w') as hout:
for f in sources:
if os.path.exists(f):
with open(f) as hin:
shutil.copyfileobj(hin, hout)
else:
logger.warning('File is missing: {}'.format(f)) |
async def execute(self, fn, *args, **kwargs) -> None:
'''Execute an arbitrary function outside the event loop using
a shared Executor.'''
fn = functools.partial(fn, *args, **kwargs)
return await self.loop.run_in_executor(self.executor, fn) | Execute an arbitrary function outside the event loop using
a shared Executor. | Below is the the instruction that describes the task:
### Input:
Execute an arbitrary function outside the event loop using
a shared Executor.
### Response:
async def execute(self, fn, *args, **kwargs) -> None:
'''Execute an arbitrary function outside the event loop using
a shared Executor.'''
fn = functools.partial(fn, *args, **kwargs)
return await self.loop.run_in_executor(self.executor, fn) |
def jarSources(target, source, env, for_signature):
"""Only include sources that are not a manifest file."""
try:
env['JARCHDIR']
except KeyError:
jarchdir_set = False
else:
jarchdir_set = True
jarchdir = env.subst('$JARCHDIR', target=target, source=source)
if jarchdir:
jarchdir = env.fs.Dir(jarchdir)
result = []
for src in source:
contents = src.get_text_contents()
if contents[:16] != "Manifest-Version":
if jarchdir_set:
_chdir = jarchdir
else:
try:
_chdir = src.attributes.java_classdir
except AttributeError:
_chdir = None
if _chdir:
# If we are changing the dir with -C, then sources should
# be relative to that directory.
src = SCons.Subst.Literal(src.get_path(_chdir))
result.append('-C')
result.append(_chdir)
result.append(src)
return result | Only include sources that are not a manifest file. | Below is the the instruction that describes the task:
### Input:
Only include sources that are not a manifest file.
### Response:
def jarSources(target, source, env, for_signature):
"""Only include sources that are not a manifest file."""
try:
env['JARCHDIR']
except KeyError:
jarchdir_set = False
else:
jarchdir_set = True
jarchdir = env.subst('$JARCHDIR', target=target, source=source)
if jarchdir:
jarchdir = env.fs.Dir(jarchdir)
result = []
for src in source:
contents = src.get_text_contents()
if contents[:16] != "Manifest-Version":
if jarchdir_set:
_chdir = jarchdir
else:
try:
_chdir = src.attributes.java_classdir
except AttributeError:
_chdir = None
if _chdir:
# If we are changing the dir with -C, then sources should
# be relative to that directory.
src = SCons.Subst.Literal(src.get_path(_chdir))
result.append('-C')
result.append(_chdir)
result.append(src)
return result |
def insert(self, nodes, pos):
# TODO: check docstring
"""Inserts all nodes from `nodes` list into this route at position `pos`
Parameters
----------
nodes : type
Desc
pos : type
Desc
"""
node_list = []
nodes_demand = 0
for node in [node for node in nodes]:
if node._allocation:
node._allocation.deallocate([node])
node_list.append(node)
node._allocation = self
nodes_demand = nodes_demand + node.demand()
self._nodes = self._nodes[:pos] + node_list + self._nodes[pos:]
self._demand += nodes_demand | Inserts all nodes from `nodes` list into this route at position `pos`
Parameters
----------
nodes : type
Desc
pos : type
Desc | Below is the the instruction that describes the task:
### Input:
Inserts all nodes from `nodes` list into this route at position `pos`
Parameters
----------
nodes : type
Desc
pos : type
Desc
### Response:
def insert(self, nodes, pos):
# TODO: check docstring
"""Inserts all nodes from `nodes` list into this route at position `pos`
Parameters
----------
nodes : type
Desc
pos : type
Desc
"""
node_list = []
nodes_demand = 0
for node in [node for node in nodes]:
if node._allocation:
node._allocation.deallocate([node])
node_list.append(node)
node._allocation = self
nodes_demand = nodes_demand + node.demand()
self._nodes = self._nodes[:pos] + node_list + self._nodes[pos:]
self._demand += nodes_demand |
def stream(self, report, callback=None):
"""Queue data for streaming
Args:
report (IOTileReport): A report object to stream to a client
callback (callable): An optional callback that will be called with
a bool value of True when this report actually gets streamed.
If the client disconnects and the report is dropped instead,
callback will be called with False
"""
conn_id = self._find_connection(self.conn_string)
if isinstance(report, BroadcastReport):
self.adapter.notify_event_nowait(self.conn_string, 'broadcast', report)
elif conn_id is not None:
self.adapter.notify_event_nowait(self.conn_string, 'report', report)
if callback is not None:
callback(isinstance(report, BroadcastReport) or (conn_id is not None)) | Queue data for streaming
Args:
report (IOTileReport): A report object to stream to a client
callback (callable): An optional callback that will be called with
a bool value of True when this report actually gets streamed.
If the client disconnects and the report is dropped instead,
callback will be called with False | Below is the the instruction that describes the task:
### Input:
Queue data for streaming
Args:
report (IOTileReport): A report object to stream to a client
callback (callable): An optional callback that will be called with
a bool value of True when this report actually gets streamed.
If the client disconnects and the report is dropped instead,
callback will be called with False
### Response:
def stream(self, report, callback=None):
"""Queue data for streaming
Args:
report (IOTileReport): A report object to stream to a client
callback (callable): An optional callback that will be called with
a bool value of True when this report actually gets streamed.
If the client disconnects and the report is dropped instead,
callback will be called with False
"""
conn_id = self._find_connection(self.conn_string)
if isinstance(report, BroadcastReport):
self.adapter.notify_event_nowait(self.conn_string, 'broadcast', report)
elif conn_id is not None:
self.adapter.notify_event_nowait(self.conn_string, 'report', report)
if callback is not None:
callback(isinstance(report, BroadcastReport) or (conn_id is not None)) |
def create_port(self, port):
"""Enqueue port create"""
instance_type = self.get_instance_type(port)
if not instance_type:
return
port_type = instance_type + a_const.PORT_SUFFIX
p_res = MechResource(port['id'], port_type, a_const.CREATE)
self.provision_queue.put(p_res) | Enqueue port create | Below is the the instruction that describes the task:
### Input:
Enqueue port create
### Response:
def create_port(self, port):
"""Enqueue port create"""
instance_type = self.get_instance_type(port)
if not instance_type:
return
port_type = instance_type + a_const.PORT_SUFFIX
p_res = MechResource(port['id'], port_type, a_const.CREATE)
self.provision_queue.put(p_res) |
def _buffer_ini(data, sample_rate):
"""
Initializes the buffer with eight 1s intervals
----------
Parameters
----------
data : ndarray
Pre-processed ECG signal samples.
sample_rate : int
Sampling rate at which the acquisition took place.
Returns
-------
rr_buffer : list
Data structure that stores eight samples (in the future this buffer will store the duration
of eight RR intervals instead of the 1 second values defined in initialisation).
spk1 : float
Initial value of SPK1 parameter defined in Pan-Tompkins real-time R peak detection algorithm
(named signal peak).
npk1 : int
Initial value of NPK1 parameter defined in Pan-Tompkins real-time R peak detection algorithm
(named noise peak).
threshold : float
Initial value of the adaptive threshold level (relevant parameter for the application of
specific criteria during the identification of R peaks).
Sources
-------
https://www.robots.ox.ac.uk/~gari/teaching/cdt/A3/readings/ECG/Pan+Tompkins.pdf
"""
rr_buffer = [1] * 8
spk1 = max(data[sample_rate:2*sample_rate])
npk1 = 0
threshold = _buffer_update(npk1, spk1)
return rr_buffer, spk1, npk1, threshold | Initializes the buffer with eight 1s intervals
----------
Parameters
----------
data : ndarray
Pre-processed ECG signal samples.
sample_rate : int
Sampling rate at which the acquisition took place.
Returns
-------
rr_buffer : list
Data structure that stores eight samples (in the future this buffer will store the duration
of eight RR intervals instead of the 1 second values defined in initialisation).
spk1 : float
Initial value of SPK1 parameter defined in Pan-Tompkins real-time R peak detection algorithm
(named signal peak).
npk1 : int
Initial value of NPK1 parameter defined in Pan-Tompkins real-time R peak detection algorithm
(named noise peak).
threshold : float
Initial value of the adaptive threshold level (relevant parameter for the application of
specific criteria during the identification of R peaks).
Sources
-------
https://www.robots.ox.ac.uk/~gari/teaching/cdt/A3/readings/ECG/Pan+Tompkins.pdf | Below is the the instruction that describes the task:
### Input:
Initializes the buffer with eight 1s intervals
----------
Parameters
----------
data : ndarray
Pre-processed ECG signal samples.
sample_rate : int
Sampling rate at which the acquisition took place.
Returns
-------
rr_buffer : list
Data structure that stores eight samples (in the future this buffer will store the duration
of eight RR intervals instead of the 1 second values defined in initialisation).
spk1 : float
Initial value of SPK1 parameter defined in Pan-Tompkins real-time R peak detection algorithm
(named signal peak).
npk1 : int
Initial value of NPK1 parameter defined in Pan-Tompkins real-time R peak detection algorithm
(named noise peak).
threshold : float
Initial value of the adaptive threshold level (relevant parameter for the application of
specific criteria during the identification of R peaks).
Sources
-------
https://www.robots.ox.ac.uk/~gari/teaching/cdt/A3/readings/ECG/Pan+Tompkins.pdf
### Response:
def _buffer_ini(data, sample_rate):
"""
Initializes the buffer with eight 1s intervals
----------
Parameters
----------
data : ndarray
Pre-processed ECG signal samples.
sample_rate : int
Sampling rate at which the acquisition took place.
Returns
-------
rr_buffer : list
Data structure that stores eight samples (in the future this buffer will store the duration
of eight RR intervals instead of the 1 second values defined in initialisation).
spk1 : float
Initial value of SPK1 parameter defined in Pan-Tompkins real-time R peak detection algorithm
(named signal peak).
npk1 : int
Initial value of NPK1 parameter defined in Pan-Tompkins real-time R peak detection algorithm
(named noise peak).
threshold : float
Initial value of the adaptive threshold level (relevant parameter for the application of
specific criteria during the identification of R peaks).
Sources
-------
https://www.robots.ox.ac.uk/~gari/teaching/cdt/A3/readings/ECG/Pan+Tompkins.pdf
"""
rr_buffer = [1] * 8
spk1 = max(data[sample_rate:2*sample_rate])
npk1 = 0
threshold = _buffer_update(npk1, spk1)
return rr_buffer, spk1, npk1, threshold |
def concretize(self, memory, addr):
"""
Concretizes the address into a list of values.
If this strategy cannot handle this address, returns None.
"""
if self._filter is None or self._filter(memory, addr):
return self._concretize(memory, addr) | Concretizes the address into a list of values.
If this strategy cannot handle this address, returns None. | Below is the the instruction that describes the task:
### Input:
Concretizes the address into a list of values.
If this strategy cannot handle this address, returns None.
### Response:
def concretize(self, memory, addr):
"""
Concretizes the address into a list of values.
If this strategy cannot handle this address, returns None.
"""
if self._filter is None or self._filter(memory, addr):
return self._concretize(memory, addr) |
def prepare(self):
"""Prepare the incoming request, checking to see the request is sending
JSON content in the request body. If so, the content is decoded and
assigned to the json_arguments attribute.
"""
super(RequestHandler, self).prepare()
if self.request.headers.get('content-type', '').startswith(self.JSON):
self.request.body = escape.json_decode(self.request.body) | Prepare the incoming request, checking to see the request is sending
JSON content in the request body. If so, the content is decoded and
assigned to the json_arguments attribute. | Below is the the instruction that describes the task:
### Input:
Prepare the incoming request, checking to see the request is sending
JSON content in the request body. If so, the content is decoded and
assigned to the json_arguments attribute.
### Response:
def prepare(self):
"""Prepare the incoming request, checking to see the request is sending
JSON content in the request body. If so, the content is decoded and
assigned to the json_arguments attribute.
"""
super(RequestHandler, self).prepare()
if self.request.headers.get('content-type', '').startswith(self.JSON):
self.request.body = escape.json_decode(self.request.body) |
def compare_container_networks(first, second):
'''
.. versionadded:: 2018.3.0
Returns the differences between two containers' networks. When a network is
only present one of the two containers, that network's diff will simply be
represented with ``True`` for the side of the diff in which the network is
present) and ``False`` for the side of the diff in which the network is
absent.
This function works by comparing the contents of both containers'
``Networks`` keys (under ``NetworkSettings``) in the return data from
:py:func:`docker.inspect_container
<salt.modules.dockermod.inspect_container>`. Because each network contains
some items that either A) only set at runtime, B) naturally varying from
container to container, or both, by default the following keys in each
network are examined:
- **Aliases**
- **Links**
- **IPAMConfig**
The exception to this is if ``IPAMConfig`` is unset (i.e. null) in one
container but not the other. This happens when no static IP configuration
is set, and automatic IP configuration is in effect. So, in order to report
on changes between automatic IP configuration in one container and static
IP configuration in another container (as we need to do for the
:py:func:`docker_container.running <salt.states.docker_container.running>`
state), automatic IP configuration will also be checked in these cases.
This function uses the :conf_minion:`docker.compare_container_networks`
minion config option to determine which keys to examine. This provides
flexibility in the event that features added in a future Docker release
necessitate changes to how Salt compares networks. In these cases, rather
than waiting for a new Salt release one can just set
:conf_minion:`docker.compare_container_networks`.
.. note::
The checks for automatic IP configuration described above only apply if
``IPAMConfig`` is among the keys set for static IP checks in
:conf_minion:`docker.compare_container_networks`.
first
Name or ID of first container (old)
second
Name or ID of second container (new)
CLI Example:
.. code-block:: bash
salt myminion docker.compare_container_networks foo bar
'''
def _get_nets(data):
return data.get('NetworkSettings', {}).get('Networks', {})
compare_keys = __opts__['docker.compare_container_networks']
result1 = inspect_container(first) \
if not isinstance(first, dict) \
else first
result2 = inspect_container(second) \
if not isinstance(second, dict) \
else second
nets1 = _get_nets(result1)
nets2 = _get_nets(result2)
state1 = state(first)
state2 = state(second)
# When you attempt and fail to set a static IP (for instance, because the
# IP is not in the network's subnet), Docker will raise an exception but
# will (incorrectly) leave the record for that network in the inspect
# results for the container. Work around this behavior (bug?) by checking
# which containers are actually connected.
all_nets = set(nets1)
all_nets.update(nets2)
for net_name in all_nets:
try:
connected_containers = inspect_network(
net_name).get('Containers', {})
except Exception as exc:
# Shouldn't happen unless a network was removed outside of Salt
# between the time that a docker_container.running state started
# and when this comparison took place.
log.warning(
'Failed to inspect Docker network %s: %s', net_name, exc)
continue
else:
if state1 == 'running' \
and net_name in nets1 \
and result1['Id'] not in connected_containers:
del nets1[net_name]
if state2 == 'running' \
and net_name in nets2 \
and result2['Id'] not in connected_containers:
del nets2[net_name]
ret = {}
def _check_ipconfig(ret, net_name, **kwargs):
# Make some variables to make the logic below easier to understand
nets1_missing = 'old' not in kwargs
if nets1_missing:
nets1_static = False
else:
nets1_static = bool(kwargs['old'])
nets1_autoip = not nets1_static and not nets1_missing
nets2_missing = 'new' not in kwargs
if nets2_missing:
nets2_static = False
else:
nets2_static = bool(kwargs['new'])
nets2_autoip = not nets2_static and not nets2_missing
autoip_keys = compare_keys.get('automatic', [])
if nets1_autoip and (nets2_static or nets2_missing):
for autoip_key in autoip_keys:
autoip_val = nets1[net_name].get(autoip_key)
if autoip_val:
ret.setdefault(net_name, {})[autoip_key] = {
'old': autoip_val, 'new': None
}
if nets2_static:
ret.setdefault(net_name, {})['IPAMConfig'] = {
'old': None, 'new': kwargs['new']
}
if not any(x in ret.get(net_name, {}) for x in autoip_keys):
ret.setdefault(net_name, {})['IPConfiguration'] = {
'old': 'automatic',
'new': 'static' if nets2_static else 'not connected'
}
elif nets2_autoip and (nets1_static or nets1_missing):
for autoip_key in autoip_keys:
autoip_val = nets2[net_name].get(autoip_key)
if autoip_val:
ret.setdefault(net_name, {})[autoip_key] = {
'old': None, 'new': autoip_val
}
if not any(x in ret.get(net_name, {}) for x in autoip_keys):
ret.setdefault(net_name, {})['IPConfiguration'] = {
'old': 'static' if nets1_static else 'not connected',
'new': 'automatic'
}
if nets1_static:
ret.setdefault(net_name, {})['IPAMConfig'] = {
'old': kwargs['old'], 'new': None
}
else:
old_val = kwargs.get('old')
new_val = kwargs.get('new')
if old_val != new_val:
# Static IP configuration present in both containers and there
# are differences, so report them
ret.setdefault(net_name, {})['IPAMConfig'] = {
'old': old_val, 'new': new_val
}
for net_name in (x for x in nets1 if x not in nets2):
# Network is not in the network_settings, but the container is attached
# to the network
for key in compare_keys.get('static', []):
val = nets1[net_name].get(key)
if key == 'IPAMConfig':
_check_ipconfig(ret, net_name, old=val)
if val:
if key == 'Aliases':
try:
val.remove(result1['Config']['Hostname'])
except (ValueError, AttributeError):
pass
else:
if not val:
# The only alias was the default one for the
# hostname
continue
ret.setdefault(net_name, {})[key] = {'old': val, 'new': None}
for net_name in nets2:
if net_name not in nets1:
# Container is not attached to the network, but network is present
# in the network_settings
for key in compare_keys.get('static', []):
val = nets2[net_name].get(key)
if key == 'IPAMConfig':
_check_ipconfig(ret, net_name, new=val)
continue
elif val:
if key == 'Aliases':
try:
val.remove(result2['Config']['Hostname'])
except (ValueError, AttributeError):
pass
else:
if not val:
# The only alias was the default one for the
# hostname
continue
ret.setdefault(net_name, {})[key] = {
'old': None, 'new': val}
else:
for key in compare_keys.get('static', []):
old_val = nets1[net_name][key]
new_val = nets2[net_name][key]
for item in (old_val, new_val):
# Normalize for list order
try:
item.sort()
except AttributeError:
pass
if key == 'Aliases':
# Normalize for hostname alias
try:
old_val.remove(result1['Config']['Hostname'])
except (AttributeError, ValueError):
pass
if not old_val:
old_val = None
try:
new_val.remove(result2['Config']['Hostname'])
except (AttributeError, ValueError):
pass
if not new_val:
new_val = None
elif key == 'IPAMConfig':
_check_ipconfig(ret, net_name, old=old_val, new=new_val)
# We don't need the final check since it's included in the
# _check_ipconfig helper
continue
if bool(old_val) is bool(new_val) is False:
continue
elif old_val != new_val:
ret.setdefault(net_name, {})[key] = {
'old': old_val, 'new': new_val
}
return ret | .. versionadded:: 2018.3.0
Returns the differences between two containers' networks. When a network is
only present one of the two containers, that network's diff will simply be
represented with ``True`` for the side of the diff in which the network is
present) and ``False`` for the side of the diff in which the network is
absent.
This function works by comparing the contents of both containers'
``Networks`` keys (under ``NetworkSettings``) in the return data from
:py:func:`docker.inspect_container
<salt.modules.dockermod.inspect_container>`. Because each network contains
some items that either A) only set at runtime, B) naturally varying from
container to container, or both, by default the following keys in each
network are examined:
- **Aliases**
- **Links**
- **IPAMConfig**
The exception to this is if ``IPAMConfig`` is unset (i.e. null) in one
container but not the other. This happens when no static IP configuration
is set, and automatic IP configuration is in effect. So, in order to report
on changes between automatic IP configuration in one container and static
IP configuration in another container (as we need to do for the
:py:func:`docker_container.running <salt.states.docker_container.running>`
state), automatic IP configuration will also be checked in these cases.
This function uses the :conf_minion:`docker.compare_container_networks`
minion config option to determine which keys to examine. This provides
flexibility in the event that features added in a future Docker release
necessitate changes to how Salt compares networks. In these cases, rather
than waiting for a new Salt release one can just set
:conf_minion:`docker.compare_container_networks`.
.. note::
The checks for automatic IP configuration described above only apply if
``IPAMConfig`` is among the keys set for static IP checks in
:conf_minion:`docker.compare_container_networks`.
first
Name or ID of first container (old)
second
Name or ID of second container (new)
CLI Example:
.. code-block:: bash
salt myminion docker.compare_container_networks foo bar | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2018.3.0
Returns the differences between two containers' networks. When a network is
only present one of the two containers, that network's diff will simply be
represented with ``True`` for the side of the diff in which the network is
present) and ``False`` for the side of the diff in which the network is
absent.
This function works by comparing the contents of both containers'
``Networks`` keys (under ``NetworkSettings``) in the return data from
:py:func:`docker.inspect_container
<salt.modules.dockermod.inspect_container>`. Because each network contains
some items that either A) only set at runtime, B) naturally varying from
container to container, or both, by default the following keys in each
network are examined:
- **Aliases**
- **Links**
- **IPAMConfig**
The exception to this is if ``IPAMConfig`` is unset (i.e. null) in one
container but not the other. This happens when no static IP configuration
is set, and automatic IP configuration is in effect. So, in order to report
on changes between automatic IP configuration in one container and static
IP configuration in another container (as we need to do for the
:py:func:`docker_container.running <salt.states.docker_container.running>`
state), automatic IP configuration will also be checked in these cases.
This function uses the :conf_minion:`docker.compare_container_networks`
minion config option to determine which keys to examine. This provides
flexibility in the event that features added in a future Docker release
necessitate changes to how Salt compares networks. In these cases, rather
than waiting for a new Salt release one can just set
:conf_minion:`docker.compare_container_networks`.
.. note::
The checks for automatic IP configuration described above only apply if
``IPAMConfig`` is among the keys set for static IP checks in
:conf_minion:`docker.compare_container_networks`.
first
Name or ID of first container (old)
second
Name or ID of second container (new)
CLI Example:
.. code-block:: bash
salt myminion docker.compare_container_networks foo bar
### Response:
def compare_container_networks(first, second):
'''
.. versionadded:: 2018.3.0
Returns the differences between two containers' networks. When a network is
only present one of the two containers, that network's diff will simply be
represented with ``True`` for the side of the diff in which the network is
present) and ``False`` for the side of the diff in which the network is
absent.
This function works by comparing the contents of both containers'
``Networks`` keys (under ``NetworkSettings``) in the return data from
:py:func:`docker.inspect_container
<salt.modules.dockermod.inspect_container>`. Because each network contains
some items that either A) only set at runtime, B) naturally varying from
container to container, or both, by default the following keys in each
network are examined:
- **Aliases**
- **Links**
- **IPAMConfig**
The exception to this is if ``IPAMConfig`` is unset (i.e. null) in one
container but not the other. This happens when no static IP configuration
is set, and automatic IP configuration is in effect. So, in order to report
on changes between automatic IP configuration in one container and static
IP configuration in another container (as we need to do for the
:py:func:`docker_container.running <salt.states.docker_container.running>`
state), automatic IP configuration will also be checked in these cases.
This function uses the :conf_minion:`docker.compare_container_networks`
minion config option to determine which keys to examine. This provides
flexibility in the event that features added in a future Docker release
necessitate changes to how Salt compares networks. In these cases, rather
than waiting for a new Salt release one can just set
:conf_minion:`docker.compare_container_networks`.
.. note::
The checks for automatic IP configuration described above only apply if
``IPAMConfig`` is among the keys set for static IP checks in
:conf_minion:`docker.compare_container_networks`.
first
Name or ID of first container (old)
second
Name or ID of second container (new)
CLI Example:
.. code-block:: bash
salt myminion docker.compare_container_networks foo bar
'''
def _get_nets(data):
return data.get('NetworkSettings', {}).get('Networks', {})
compare_keys = __opts__['docker.compare_container_networks']
result1 = inspect_container(first) \
if not isinstance(first, dict) \
else first
result2 = inspect_container(second) \
if not isinstance(second, dict) \
else second
nets1 = _get_nets(result1)
nets2 = _get_nets(result2)
state1 = state(first)
state2 = state(second)
# When you attempt and fail to set a static IP (for instance, because the
# IP is not in the network's subnet), Docker will raise an exception but
# will (incorrectly) leave the record for that network in the inspect
# results for the container. Work around this behavior (bug?) by checking
# which containers are actually connected.
all_nets = set(nets1)
all_nets.update(nets2)
for net_name in all_nets:
try:
connected_containers = inspect_network(
net_name).get('Containers', {})
except Exception as exc:
# Shouldn't happen unless a network was removed outside of Salt
# between the time that a docker_container.running state started
# and when this comparison took place.
log.warning(
'Failed to inspect Docker network %s: %s', net_name, exc)
continue
else:
if state1 == 'running' \
and net_name in nets1 \
and result1['Id'] not in connected_containers:
del nets1[net_name]
if state2 == 'running' \
and net_name in nets2 \
and result2['Id'] not in connected_containers:
del nets2[net_name]
ret = {}
def _check_ipconfig(ret, net_name, **kwargs):
# Make some variables to make the logic below easier to understand
nets1_missing = 'old' not in kwargs
if nets1_missing:
nets1_static = False
else:
nets1_static = bool(kwargs['old'])
nets1_autoip = not nets1_static and not nets1_missing
nets2_missing = 'new' not in kwargs
if nets2_missing:
nets2_static = False
else:
nets2_static = bool(kwargs['new'])
nets2_autoip = not nets2_static and not nets2_missing
autoip_keys = compare_keys.get('automatic', [])
if nets1_autoip and (nets2_static or nets2_missing):
for autoip_key in autoip_keys:
autoip_val = nets1[net_name].get(autoip_key)
if autoip_val:
ret.setdefault(net_name, {})[autoip_key] = {
'old': autoip_val, 'new': None
}
if nets2_static:
ret.setdefault(net_name, {})['IPAMConfig'] = {
'old': None, 'new': kwargs['new']
}
if not any(x in ret.get(net_name, {}) for x in autoip_keys):
ret.setdefault(net_name, {})['IPConfiguration'] = {
'old': 'automatic',
'new': 'static' if nets2_static else 'not connected'
}
elif nets2_autoip and (nets1_static or nets1_missing):
for autoip_key in autoip_keys:
autoip_val = nets2[net_name].get(autoip_key)
if autoip_val:
ret.setdefault(net_name, {})[autoip_key] = {
'old': None, 'new': autoip_val
}
if not any(x in ret.get(net_name, {}) for x in autoip_keys):
ret.setdefault(net_name, {})['IPConfiguration'] = {
'old': 'static' if nets1_static else 'not connected',
'new': 'automatic'
}
if nets1_static:
ret.setdefault(net_name, {})['IPAMConfig'] = {
'old': kwargs['old'], 'new': None
}
else:
old_val = kwargs.get('old')
new_val = kwargs.get('new')
if old_val != new_val:
# Static IP configuration present in both containers and there
# are differences, so report them
ret.setdefault(net_name, {})['IPAMConfig'] = {
'old': old_val, 'new': new_val
}
for net_name in (x for x in nets1 if x not in nets2):
# Network is not in the network_settings, but the container is attached
# to the network
for key in compare_keys.get('static', []):
val = nets1[net_name].get(key)
if key == 'IPAMConfig':
_check_ipconfig(ret, net_name, old=val)
if val:
if key == 'Aliases':
try:
val.remove(result1['Config']['Hostname'])
except (ValueError, AttributeError):
pass
else:
if not val:
# The only alias was the default one for the
# hostname
continue
ret.setdefault(net_name, {})[key] = {'old': val, 'new': None}
for net_name in nets2:
if net_name not in nets1:
# Container is not attached to the network, but network is present
# in the network_settings
for key in compare_keys.get('static', []):
val = nets2[net_name].get(key)
if key == 'IPAMConfig':
_check_ipconfig(ret, net_name, new=val)
continue
elif val:
if key == 'Aliases':
try:
val.remove(result2['Config']['Hostname'])
except (ValueError, AttributeError):
pass
else:
if not val:
# The only alias was the default one for the
# hostname
continue
ret.setdefault(net_name, {})[key] = {
'old': None, 'new': val}
else:
for key in compare_keys.get('static', []):
old_val = nets1[net_name][key]
new_val = nets2[net_name][key]
for item in (old_val, new_val):
# Normalize for list order
try:
item.sort()
except AttributeError:
pass
if key == 'Aliases':
# Normalize for hostname alias
try:
old_val.remove(result1['Config']['Hostname'])
except (AttributeError, ValueError):
pass
if not old_val:
old_val = None
try:
new_val.remove(result2['Config']['Hostname'])
except (AttributeError, ValueError):
pass
if not new_val:
new_val = None
elif key == 'IPAMConfig':
_check_ipconfig(ret, net_name, old=old_val, new=new_val)
# We don't need the final check since it's included in the
# _check_ipconfig helper
continue
if bool(old_val) is bool(new_val) is False:
continue
elif old_val != new_val:
ret.setdefault(net_name, {})[key] = {
'old': old_val, 'new': new_val
}
return ret |
def _make_passage_kwargs(urn, reference):
""" Little helper used by CapitainsCtsPassage here to comply with parents args
:param urn: URN String
:param reference: Reference String
:return: Dictionary of arguments with URN based on identifier and reference
"""
kwargs = {}
if urn is not None:
if reference is not None:
kwargs["urn"] = URN("{}:{}".format(urn.upTo(URN.VERSION), reference))
else:
kwargs["urn"] = urn
return kwargs | Little helper used by CapitainsCtsPassage here to comply with parents args
:param urn: URN String
:param reference: Reference String
:return: Dictionary of arguments with URN based on identifier and reference | Below is the the instruction that describes the task:
### Input:
Little helper used by CapitainsCtsPassage here to comply with parents args
:param urn: URN String
:param reference: Reference String
:return: Dictionary of arguments with URN based on identifier and reference
### Response:
def _make_passage_kwargs(urn, reference):
""" Little helper used by CapitainsCtsPassage here to comply with parents args
:param urn: URN String
:param reference: Reference String
:return: Dictionary of arguments with URN based on identifier and reference
"""
kwargs = {}
if urn is not None:
if reference is not None:
kwargs["urn"] = URN("{}:{}".format(urn.upTo(URN.VERSION), reference))
else:
kwargs["urn"] = urn
return kwargs |
def __dump_compose_file(path, compose_result, success_msg, already_existed):
'''
Utility function to dump the compose result to a file.
:param path:
:param compose_result:
:param success_msg: the message to give upon success
:return:
'''
ret = __dump_docker_compose(path,
compose_result['compose_content'],
already_existed)
if isinstance(ret, dict):
return ret
return __standardize_result(True, success_msg,
compose_result['compose_content'], None) | Utility function to dump the compose result to a file.
:param path:
:param compose_result:
:param success_msg: the message to give upon success
:return: | Below is the the instruction that describes the task:
### Input:
Utility function to dump the compose result to a file.
:param path:
:param compose_result:
:param success_msg: the message to give upon success
:return:
### Response:
def __dump_compose_file(path, compose_result, success_msg, already_existed):
'''
Utility function to dump the compose result to a file.
:param path:
:param compose_result:
:param success_msg: the message to give upon success
:return:
'''
ret = __dump_docker_compose(path,
compose_result['compose_content'],
already_existed)
if isinstance(ret, dict):
return ret
return __standardize_result(True, success_msg,
compose_result['compose_content'], None) |
def returns(*checkers_args):
""" Create a decorator for validating function return values.
Parameters
----------
checkers_args: positional arguments
A single functions to apply to the output of the decorated function. If a tuple is returned
by the decorated function, multiple function can be listed and are assumed to match by
possition to the elements in the returned tuple.
Examples
--------
@returns(df_checker)
def do_something_with_df(df, args*, kw**):
print(df.head())
return df
@returns(df_checker1, df_checker2)
def do_something_with_dfs(df1, df2, args*, kw**):
# Do somethign with both dfs
return (df1, df2)
"""
@decorator
def run_checkers(func, *args, **kwargs):
ret = func(*args, **kwargs)
if type(ret) != tuple:
ret = (ret, )
assert len(ret) == len(checkers_args)
if checkers_args:
for idx, checker_function in enumerate(checkers_args):
if callable(checker_function):
result = checker_function(ret[idx])
return ret
return run_checkers | Create a decorator for validating function return values.
Parameters
----------
checkers_args: positional arguments
A single functions to apply to the output of the decorated function. If a tuple is returned
by the decorated function, multiple function can be listed and are assumed to match by
possition to the elements in the returned tuple.
Examples
--------
@returns(df_checker)
def do_something_with_df(df, args*, kw**):
print(df.head())
return df
@returns(df_checker1, df_checker2)
def do_something_with_dfs(df1, df2, args*, kw**):
# Do somethign with both dfs
return (df1, df2) | Below is the the instruction that describes the task:
### Input:
Create a decorator for validating function return values.
Parameters
----------
checkers_args: positional arguments
A single functions to apply to the output of the decorated function. If a tuple is returned
by the decorated function, multiple function can be listed and are assumed to match by
possition to the elements in the returned tuple.
Examples
--------
@returns(df_checker)
def do_something_with_df(df, args*, kw**):
print(df.head())
return df
@returns(df_checker1, df_checker2)
def do_something_with_dfs(df1, df2, args*, kw**):
# Do somethign with both dfs
return (df1, df2)
### Response:
def returns(*checkers_args):
""" Create a decorator for validating function return values.
Parameters
----------
checkers_args: positional arguments
A single functions to apply to the output of the decorated function. If a tuple is returned
by the decorated function, multiple function can be listed and are assumed to match by
possition to the elements in the returned tuple.
Examples
--------
@returns(df_checker)
def do_something_with_df(df, args*, kw**):
print(df.head())
return df
@returns(df_checker1, df_checker2)
def do_something_with_dfs(df1, df2, args*, kw**):
# Do somethign with both dfs
return (df1, df2)
"""
@decorator
def run_checkers(func, *args, **kwargs):
ret = func(*args, **kwargs)
if type(ret) != tuple:
ret = (ret, )
assert len(ret) == len(checkers_args)
if checkers_args:
for idx, checker_function in enumerate(checkers_args):
if callable(checker_function):
result = checker_function(ret[idx])
return ret
return run_checkers |
def timeCall(*funcAndArgs, **kwargs):
r"""Return the time (in ms) it takes to call a function (the first
argument) with the remaining arguments and `kwargs`.
Examples:
To find out how long ``func('foo', spam=1)`` takes to execute, do:
``timeCall(func, foo, spam=1)``
"""
func, args = funcAndArgs[0], funcAndArgs[1:]
start = time.time()
func(*args, **kwargs)
return time.time() - start | r"""Return the time (in ms) it takes to call a function (the first
argument) with the remaining arguments and `kwargs`.
Examples:
To find out how long ``func('foo', spam=1)`` takes to execute, do:
``timeCall(func, foo, spam=1)`` | Below is the the instruction that describes the task:
### Input:
r"""Return the time (in ms) it takes to call a function (the first
argument) with the remaining arguments and `kwargs`.
Examples:
To find out how long ``func('foo', spam=1)`` takes to execute, do:
``timeCall(func, foo, spam=1)``
### Response:
def timeCall(*funcAndArgs, **kwargs):
r"""Return the time (in ms) it takes to call a function (the first
argument) with the remaining arguments and `kwargs`.
Examples:
To find out how long ``func('foo', spam=1)`` takes to execute, do:
``timeCall(func, foo, spam=1)``
"""
func, args = funcAndArgs[0], funcAndArgs[1:]
start = time.time()
func(*args, **kwargs)
return time.time() - start |
def subst_src_suffixes(self, env):
"""
The suffix list may contain construction variable expansions,
so we have to evaluate the individual strings. To avoid doing
this over and over, we memoize the results for each construction
environment.
"""
memo_key = id(env)
try:
memo_dict = self._memo['subst_src_suffixes']
except KeyError:
memo_dict = {}
self._memo['subst_src_suffixes'] = memo_dict
else:
try:
return memo_dict[memo_key]
except KeyError:
pass
suffixes = [env.subst(x) for x in self.src_suffix]
memo_dict[memo_key] = suffixes
return suffixes | The suffix list may contain construction variable expansions,
so we have to evaluate the individual strings. To avoid doing
this over and over, we memoize the results for each construction
environment. | Below is the the instruction that describes the task:
### Input:
The suffix list may contain construction variable expansions,
so we have to evaluate the individual strings. To avoid doing
this over and over, we memoize the results for each construction
environment.
### Response:
def subst_src_suffixes(self, env):
"""
The suffix list may contain construction variable expansions,
so we have to evaluate the individual strings. To avoid doing
this over and over, we memoize the results for each construction
environment.
"""
memo_key = id(env)
try:
memo_dict = self._memo['subst_src_suffixes']
except KeyError:
memo_dict = {}
self._memo['subst_src_suffixes'] = memo_dict
else:
try:
return memo_dict[memo_key]
except KeyError:
pass
suffixes = [env.subst(x) for x in self.src_suffix]
memo_dict[memo_key] = suffixes
return suffixes |
def mouse_button_callback(self, window, button, action, mods):
"""
Handle mouse button events and forward them to the example
"""
# Offset button index by 1 to make it match the other libraries
button += 1
# Support left and right mouse button for now
if button not in [1, 2]:
return
xpos, ypos = glfw.get_cursor_pos(self.window)
if action == glfw.PRESS:
self.example.mouse_press_event(xpos, ypos, button)
else:
self.example.mouse_release_event(xpos, ypos, button) | Handle mouse button events and forward them to the example | Below is the the instruction that describes the task:
### Input:
Handle mouse button events and forward them to the example
### Response:
def mouse_button_callback(self, window, button, action, mods):
"""
Handle mouse button events and forward them to the example
"""
# Offset button index by 1 to make it match the other libraries
button += 1
# Support left and right mouse button for now
if button not in [1, 2]:
return
xpos, ypos = glfw.get_cursor_pos(self.window)
if action == glfw.PRESS:
self.example.mouse_press_event(xpos, ypos, button)
else:
self.example.mouse_release_event(xpos, ypos, button) |
def wait_for_array(self, wait_for, timeout_ms):
"""Waits for one or more events to happen.
Scriptable version of :py:func:`wait_for` .
in wait_for of type :class:`GuestSessionWaitForFlag`
Specifies what to wait for;
see :py:class:`GuestSessionWaitForFlag` for more information.
in timeout_ms of type int
Timeout (in ms) to wait for the operation to complete.
Pass 0 for an infinite timeout.
return reason of type :class:`GuestSessionWaitResult`
The overall wait result;
see :py:class:`GuestSessionWaitResult` for more information.
"""
if not isinstance(wait_for, list):
raise TypeError("wait_for can only be an instance of type list")
for a in wait_for[:10]:
if not isinstance(a, GuestSessionWaitForFlag):
raise TypeError(
"array can only contain objects of type GuestSessionWaitForFlag")
if not isinstance(timeout_ms, baseinteger):
raise TypeError("timeout_ms can only be an instance of type baseinteger")
reason = self._call("waitForArray",
in_p=[wait_for, timeout_ms])
reason = GuestSessionWaitResult(reason)
return reason | Waits for one or more events to happen.
Scriptable version of :py:func:`wait_for` .
in wait_for of type :class:`GuestSessionWaitForFlag`
Specifies what to wait for;
see :py:class:`GuestSessionWaitForFlag` for more information.
in timeout_ms of type int
Timeout (in ms) to wait for the operation to complete.
Pass 0 for an infinite timeout.
return reason of type :class:`GuestSessionWaitResult`
The overall wait result;
see :py:class:`GuestSessionWaitResult` for more information. | Below is the the instruction that describes the task:
### Input:
Waits for one or more events to happen.
Scriptable version of :py:func:`wait_for` .
in wait_for of type :class:`GuestSessionWaitForFlag`
Specifies what to wait for;
see :py:class:`GuestSessionWaitForFlag` for more information.
in timeout_ms of type int
Timeout (in ms) to wait for the operation to complete.
Pass 0 for an infinite timeout.
return reason of type :class:`GuestSessionWaitResult`
The overall wait result;
see :py:class:`GuestSessionWaitResult` for more information.
### Response:
def wait_for_array(self, wait_for, timeout_ms):
"""Waits for one or more events to happen.
Scriptable version of :py:func:`wait_for` .
in wait_for of type :class:`GuestSessionWaitForFlag`
Specifies what to wait for;
see :py:class:`GuestSessionWaitForFlag` for more information.
in timeout_ms of type int
Timeout (in ms) to wait for the operation to complete.
Pass 0 for an infinite timeout.
return reason of type :class:`GuestSessionWaitResult`
The overall wait result;
see :py:class:`GuestSessionWaitResult` for more information.
"""
if not isinstance(wait_for, list):
raise TypeError("wait_for can only be an instance of type list")
for a in wait_for[:10]:
if not isinstance(a, GuestSessionWaitForFlag):
raise TypeError(
"array can only contain objects of type GuestSessionWaitForFlag")
if not isinstance(timeout_ms, baseinteger):
raise TypeError("timeout_ms can only be an instance of type baseinteger")
reason = self._call("waitForArray",
in_p=[wait_for, timeout_ms])
reason = GuestSessionWaitResult(reason)
return reason |
def SetupAuth(self,
project_id=None,
project_number=None,
service_account_json_file=None):
"""Sets up authentication with Google APIs.
This will use the credentials from service_account_json_file if provided,
falling back to application default credentials.
See https://cloud.google.com/docs/authentication/production.
Args:
project_id: GCP project ID (e.g. myproject). If not provided, will attempt
to retrieve it from the credentials.
project_number: GCP project number (e.g. 72386324623). If not provided,
project_id will be used in its place.
service_account_json_file: JSON file to use for credentials. If not
provided, will default to application default credentials.
Raises:
NoProjectIdError: If the project id cannot be determined.
"""
if service_account_json_file:
self._credentials = (
service_account.Credentials.from_service_account_file(
service_account_json_file, scopes=_CLOUD_PLATFORM_SCOPE))
if not project_id:
with open(service_account_json_file) as f:
project_id = json.load(f).get('project_id')
else:
self._credentials, credentials_project_id = google.auth.default(
scopes=_CLOUD_PLATFORM_SCOPE)
project_id = project_id or credentials_project_id
if not project_id:
raise NoProjectIdError(
'Unable to determine the project id from the API credentials. '
'Please specify the project id using the --project_id flag.')
self._project_id = project_id
self._project_number = project_number or project_id | Sets up authentication with Google APIs.
This will use the credentials from service_account_json_file if provided,
falling back to application default credentials.
See https://cloud.google.com/docs/authentication/production.
Args:
project_id: GCP project ID (e.g. myproject). If not provided, will attempt
to retrieve it from the credentials.
project_number: GCP project number (e.g. 72386324623). If not provided,
project_id will be used in its place.
service_account_json_file: JSON file to use for credentials. If not
provided, will default to application default credentials.
Raises:
NoProjectIdError: If the project id cannot be determined. | Below is the the instruction that describes the task:
### Input:
Sets up authentication with Google APIs.
This will use the credentials from service_account_json_file if provided,
falling back to application default credentials.
See https://cloud.google.com/docs/authentication/production.
Args:
project_id: GCP project ID (e.g. myproject). If not provided, will attempt
to retrieve it from the credentials.
project_number: GCP project number (e.g. 72386324623). If not provided,
project_id will be used in its place.
service_account_json_file: JSON file to use for credentials. If not
provided, will default to application default credentials.
Raises:
NoProjectIdError: If the project id cannot be determined.
### Response:
def SetupAuth(self,
project_id=None,
project_number=None,
service_account_json_file=None):
"""Sets up authentication with Google APIs.
This will use the credentials from service_account_json_file if provided,
falling back to application default credentials.
See https://cloud.google.com/docs/authentication/production.
Args:
project_id: GCP project ID (e.g. myproject). If not provided, will attempt
to retrieve it from the credentials.
project_number: GCP project number (e.g. 72386324623). If not provided,
project_id will be used in its place.
service_account_json_file: JSON file to use for credentials. If not
provided, will default to application default credentials.
Raises:
NoProjectIdError: If the project id cannot be determined.
"""
if service_account_json_file:
self._credentials = (
service_account.Credentials.from_service_account_file(
service_account_json_file, scopes=_CLOUD_PLATFORM_SCOPE))
if not project_id:
with open(service_account_json_file) as f:
project_id = json.load(f).get('project_id')
else:
self._credentials, credentials_project_id = google.auth.default(
scopes=_CLOUD_PLATFORM_SCOPE)
project_id = project_id or credentials_project_id
if not project_id:
raise NoProjectIdError(
'Unable to determine the project id from the API credentials. '
'Please specify the project id using the --project_id flag.')
self._project_id = project_id
self._project_number = project_number or project_id |
def _create_feed_dict(self, data):
"""Create the dictionary of data to feed to tf session during training.
:param data: training/validation set batch
:return: dictionary(self.input_data: data, self.hrand: random_uniform,
self.vrand: random_uniform)
"""
return {
self.input_data: data,
self.hrand: np.random.rand(data.shape[0], self.num_hidden),
self.vrand: np.random.rand(data.shape[0], data.shape[1])
} | Create the dictionary of data to feed to tf session during training.
:param data: training/validation set batch
:return: dictionary(self.input_data: data, self.hrand: random_uniform,
self.vrand: random_uniform) | Below is the the instruction that describes the task:
### Input:
Create the dictionary of data to feed to tf session during training.
:param data: training/validation set batch
:return: dictionary(self.input_data: data, self.hrand: random_uniform,
self.vrand: random_uniform)
### Response:
def _create_feed_dict(self, data):
"""Create the dictionary of data to feed to tf session during training.
:param data: training/validation set batch
:return: dictionary(self.input_data: data, self.hrand: random_uniform,
self.vrand: random_uniform)
"""
return {
self.input_data: data,
self.hrand: np.random.rand(data.shape[0], self.num_hidden),
self.vrand: np.random.rand(data.shape[0], data.shape[1])
} |
def setup_to_argv(setup):
'''
:param dict setup:
A dict previously gotten from process_command_line.
:note: does not handle --file nor --DEBUG.
'''
ret = [get_pydevd_file()]
for handler in ACCEPTED_ARG_HANDLERS:
if handler.arg_name in setup:
handler.to_argv(ret, setup)
return ret | :param dict setup:
A dict previously gotten from process_command_line.
:note: does not handle --file nor --DEBUG. | Below is the the instruction that describes the task:
### Input:
:param dict setup:
A dict previously gotten from process_command_line.
:note: does not handle --file nor --DEBUG.
### Response:
def setup_to_argv(setup):
'''
:param dict setup:
A dict previously gotten from process_command_line.
:note: does not handle --file nor --DEBUG.
'''
ret = [get_pydevd_file()]
for handler in ACCEPTED_ARG_HANDLERS:
if handler.arg_name in setup:
handler.to_argv(ret, setup)
return ret |
def should_see_link_text(self, link_text, link_url):
"""Assert a link with the provided text points to the provided URL."""
elements = ElementSelector(
world.browser,
str('//a[@href="%s"][./text()="%s"]' % (link_url, link_text)),
filter_displayed=True,
)
if not elements:
raise AssertionError("Expected link not found.") | Assert a link with the provided text points to the provided URL. | Below is the the instruction that describes the task:
### Input:
Assert a link with the provided text points to the provided URL.
### Response:
def should_see_link_text(self, link_text, link_url):
"""Assert a link with the provided text points to the provided URL."""
elements = ElementSelector(
world.browser,
str('//a[@href="%s"][./text()="%s"]' % (link_url, link_text)),
filter_displayed=True,
)
if not elements:
raise AssertionError("Expected link not found.") |
def set_redirect(self, url, status=HttpStatusCodes.HTTP_303):
"""Helper method to set a redirect response.
Args:
url (:obj:`str`): URL to redirect to
status (:obj:`str`, optional): Status code of the response
"""
self.set_status(status)
self.set_content('')
self.set_header(HttpResponseHeaders.LOCATION, url) | Helper method to set a redirect response.
Args:
url (:obj:`str`): URL to redirect to
status (:obj:`str`, optional): Status code of the response | Below is the the instruction that describes the task:
### Input:
Helper method to set a redirect response.
Args:
url (:obj:`str`): URL to redirect to
status (:obj:`str`, optional): Status code of the response
### Response:
def set_redirect(self, url, status=HttpStatusCodes.HTTP_303):
"""Helper method to set a redirect response.
Args:
url (:obj:`str`): URL to redirect to
status (:obj:`str`, optional): Status code of the response
"""
self.set_status(status)
self.set_content('')
self.set_header(HttpResponseHeaders.LOCATION, url) |
def _read_value(path):
"""
Reads value of specified path.
:param path: A valid system path
"""
read_value = 0
if not os.path.exists(path):
# Path will generally only exist on a Raspberry Pi
pass
else:
with open(path) as f:
read_value = int(f.read())
return read_value | Reads value of specified path.
:param path: A valid system path | Below is the the instruction that describes the task:
### Input:
Reads value of specified path.
:param path: A valid system path
### Response:
def _read_value(path):
"""
Reads value of specified path.
:param path: A valid system path
"""
read_value = 0
if not os.path.exists(path):
# Path will generally only exist on a Raspberry Pi
pass
else:
with open(path) as f:
read_value = int(f.read())
return read_value |
def levels_to_accepting_states(self) -> dict:
"""Return a dict from states to level, i.e. the number of steps to reach any accepting state.
level = -1 if the state cannot reach any accepting state"""
res = {accepting_state: 0 for accepting_state in self._accepting_states}
level = 0
# least fixpoint
z_current, z_next = set(), set()
z_next = set(self._accepting_states)
while z_current != z_next:
level += 1
z_current = z_next
z_next = copy(z_current)
for state in self._transition_function:
for action in self._transition_function[state]:
if state in z_current:
continue
next_state = self._transition_function[state][action]
if next_state in z_current:
z_next.add(state)
res[state] = level
break
z_current = z_next
for failure_state in filter(lambda x: x not in z_current, self._states):
res[failure_state] = -1
return res | Return a dict from states to level, i.e. the number of steps to reach any accepting state.
level = -1 if the state cannot reach any accepting state | Below is the the instruction that describes the task:
### Input:
Return a dict from states to level, i.e. the number of steps to reach any accepting state.
level = -1 if the state cannot reach any accepting state
### Response:
def levels_to_accepting_states(self) -> dict:
"""Return a dict from states to level, i.e. the number of steps to reach any accepting state.
level = -1 if the state cannot reach any accepting state"""
res = {accepting_state: 0 for accepting_state in self._accepting_states}
level = 0
# least fixpoint
z_current, z_next = set(), set()
z_next = set(self._accepting_states)
while z_current != z_next:
level += 1
z_current = z_next
z_next = copy(z_current)
for state in self._transition_function:
for action in self._transition_function[state]:
if state in z_current:
continue
next_state = self._transition_function[state][action]
if next_state in z_current:
z_next.add(state)
res[state] = level
break
z_current = z_next
for failure_state in filter(lambda x: x not in z_current, self._states):
res[failure_state] = -1
return res |
def add_clause(self, clause, no_return=True):
"""
Add a new clause to solver's internal formula.
"""
if self.maplesat:
res = pysolvers.maplesat_add_cl(self.maplesat, clause)
if res == False:
self.status = False
if not no_return:
return res | Add a new clause to solver's internal formula. | Below is the the instruction that describes the task:
### Input:
Add a new clause to solver's internal formula.
### Response:
def add_clause(self, clause, no_return=True):
"""
Add a new clause to solver's internal formula.
"""
if self.maplesat:
res = pysolvers.maplesat_add_cl(self.maplesat, clause)
if res == False:
self.status = False
if not no_return:
return res |
def get_checkpoint_path(model_path):
"""
Work around TF problems in checkpoint path handling.
Args:
model_path: a user-input path
Returns:
str: the argument that can be passed to NewCheckpointReader
"""
if os.path.basename(model_path) == model_path:
model_path = os.path.join('.', model_path) # avoid #4921 and #6142
if os.path.basename(model_path) == 'checkpoint':
assert tfv1.gfile.Exists(model_path), model_path
model_path = tf.train.latest_checkpoint(os.path.dirname(model_path))
# to be consistent with either v1 or v2
# fix paths if provided a wrong one
new_path = model_path
if '00000-of-00001' in model_path:
new_path = model_path.split('.data')[0]
elif model_path.endswith('.index'):
new_path = model_path.split('.index')[0]
if new_path != model_path:
logger.info(
"Checkpoint path {} is auto-corrected to {}.".format(model_path, new_path))
model_path = new_path
assert tfv1.gfile.Exists(model_path) or tfv1.gfile.Exists(model_path + '.index'), model_path
return model_path | Work around TF problems in checkpoint path handling.
Args:
model_path: a user-input path
Returns:
str: the argument that can be passed to NewCheckpointReader | Below is the the instruction that describes the task:
### Input:
Work around TF problems in checkpoint path handling.
Args:
model_path: a user-input path
Returns:
str: the argument that can be passed to NewCheckpointReader
### Response:
def get_checkpoint_path(model_path):
"""
Work around TF problems in checkpoint path handling.
Args:
model_path: a user-input path
Returns:
str: the argument that can be passed to NewCheckpointReader
"""
if os.path.basename(model_path) == model_path:
model_path = os.path.join('.', model_path) # avoid #4921 and #6142
if os.path.basename(model_path) == 'checkpoint':
assert tfv1.gfile.Exists(model_path), model_path
model_path = tf.train.latest_checkpoint(os.path.dirname(model_path))
# to be consistent with either v1 or v2
# fix paths if provided a wrong one
new_path = model_path
if '00000-of-00001' in model_path:
new_path = model_path.split('.data')[0]
elif model_path.endswith('.index'):
new_path = model_path.split('.index')[0]
if new_path != model_path:
logger.info(
"Checkpoint path {} is auto-corrected to {}.".format(model_path, new_path))
model_path = new_path
assert tfv1.gfile.Exists(model_path) or tfv1.gfile.Exists(model_path + '.index'), model_path
return model_path |
def _read_from_seg(self, n):
"""Read from current seg.
Args:
n: max number of bytes to read.
Returns:
valid bytes from the current seg. "" if no more is left.
"""
result = self._seg.read(size=n)
if result == "":
return result
offset = self._seg.tell()
if offset > self._seg_valid_length:
extra = offset - self._seg_valid_length
result = result[:-1*extra]
self._offset += len(result)
return result | Read from current seg.
Args:
n: max number of bytes to read.
Returns:
valid bytes from the current seg. "" if no more is left. | Below is the the instruction that describes the task:
### Input:
Read from current seg.
Args:
n: max number of bytes to read.
Returns:
valid bytes from the current seg. "" if no more is left.
### Response:
def _read_from_seg(self, n):
"""Read from current seg.
Args:
n: max number of bytes to read.
Returns:
valid bytes from the current seg. "" if no more is left.
"""
result = self._seg.read(size=n)
if result == "":
return result
offset = self._seg.tell()
if offset > self._seg_valid_length:
extra = offset - self._seg_valid_length
result = result[:-1*extra]
self._offset += len(result)
return result |
def uvw(self, context):
""" Supply UVW antenna coordinates to montblanc """
# Shape (ntime, na, 3)
(lt, ut), (la, ua), (l, u) = context.array_extents(context.name)
# Create empty UVW coordinates
data = np.empty(context.shape, context.dtype)
data[:,:,0] = np.arange(la+1, ua+1) # U = antenna index
data[:,:,1] = 0 # V = 0
data[:,:,2] = 0 # W = 0
return data | Supply UVW antenna coordinates to montblanc | Below is the the instruction that describes the task:
### Input:
Supply UVW antenna coordinates to montblanc
### Response:
def uvw(self, context):
""" Supply UVW antenna coordinates to montblanc """
# Shape (ntime, na, 3)
(lt, ut), (la, ua), (l, u) = context.array_extents(context.name)
# Create empty UVW coordinates
data = np.empty(context.shape, context.dtype)
data[:,:,0] = np.arange(la+1, ua+1) # U = antenna index
data[:,:,1] = 0 # V = 0
data[:,:,2] = 0 # W = 0
return data |
def iso8601_date(s):
"""
Parses an ISO 8601 date string and returns a UTC date object or the string
if the parsing failed.
:param s: ISO 8601-formatted date string (2015-01-25)
:return:
"""
try:
return datetime.datetime.strptime(s, ISO8601_DATE_FORMAT).replace(tzinfo=pytz.utc).date()
except (TypeError, ValueError):
return s | Parses an ISO 8601 date string and returns a UTC date object or the string
if the parsing failed.
:param s: ISO 8601-formatted date string (2015-01-25)
:return: | Below is the the instruction that describes the task:
### Input:
Parses an ISO 8601 date string and returns a UTC date object or the string
if the parsing failed.
:param s: ISO 8601-formatted date string (2015-01-25)
:return:
### Response:
def iso8601_date(s):
"""
Parses an ISO 8601 date string and returns a UTC date object or the string
if the parsing failed.
:param s: ISO 8601-formatted date string (2015-01-25)
:return:
"""
try:
return datetime.datetime.strptime(s, ISO8601_DATE_FORMAT).replace(tzinfo=pytz.utc).date()
except (TypeError, ValueError):
return s |
def _load_script(self, filename: str) -> Script:
"""Load a Lua script.
Read the Lua script file to generate its Script object. If the script
starts with a magic string, add it to the list of scripts requiring an
idempotency token to execute.
"""
with open(path.join(here, 'redis_scripts', filename), mode='rb') as f:
script_data = f.read()
rv = self._r.register_script(script_data)
if script_data.startswith(b'-- idempotency protected script'):
self._idempotency_protected_scripts.append(rv)
return rv | Load a Lua script.
Read the Lua script file to generate its Script object. If the script
starts with a magic string, add it to the list of scripts requiring an
idempotency token to execute. | Below is the the instruction that describes the task:
### Input:
Load a Lua script.
Read the Lua script file to generate its Script object. If the script
starts with a magic string, add it to the list of scripts requiring an
idempotency token to execute.
### Response:
def _load_script(self, filename: str) -> Script:
"""Load a Lua script.
Read the Lua script file to generate its Script object. If the script
starts with a magic string, add it to the list of scripts requiring an
idempotency token to execute.
"""
with open(path.join(here, 'redis_scripts', filename), mode='rb') as f:
script_data = f.read()
rv = self._r.register_script(script_data)
if script_data.startswith(b'-- idempotency protected script'):
self._idempotency_protected_scripts.append(rv)
return rv |
def remote_startCommand(self, stepref, stepId, command, args):
"""
This gets invoked by L{buildbot.process.step.RemoteCommand.start}, as
part of various master-side BuildSteps, to start various commands
that actually do the build. I return nothing. Eventually I will call
.commandComplete() to notify the master-side RemoteCommand that I'm
done.
"""
stepId = decode(stepId)
command = decode(command)
args = decode(args)
self.activity()
if self.command:
log.msg("leftover command, dropping it")
self.stopCommand()
try:
factory = registry.getFactory(command)
except KeyError:
raise UnknownCommand(u"unrecognized WorkerCommand '{0}'".format(command))
self.command = factory(self, stepId, args)
log.msg(u" startCommand:{0} [id {1}]".format(command, stepId))
self.remoteStep = stepref
self.remoteStep.notifyOnDisconnect(self.lostRemoteStep)
d = self.command.doStart()
d.addCallback(lambda res: None)
d.addBoth(self.commandComplete)
return None | This gets invoked by L{buildbot.process.step.RemoteCommand.start}, as
part of various master-side BuildSteps, to start various commands
that actually do the build. I return nothing. Eventually I will call
.commandComplete() to notify the master-side RemoteCommand that I'm
done. | Below is the the instruction that describes the task:
### Input:
This gets invoked by L{buildbot.process.step.RemoteCommand.start}, as
part of various master-side BuildSteps, to start various commands
that actually do the build. I return nothing. Eventually I will call
.commandComplete() to notify the master-side RemoteCommand that I'm
done.
### Response:
def remote_startCommand(self, stepref, stepId, command, args):
"""
This gets invoked by L{buildbot.process.step.RemoteCommand.start}, as
part of various master-side BuildSteps, to start various commands
that actually do the build. I return nothing. Eventually I will call
.commandComplete() to notify the master-side RemoteCommand that I'm
done.
"""
stepId = decode(stepId)
command = decode(command)
args = decode(args)
self.activity()
if self.command:
log.msg("leftover command, dropping it")
self.stopCommand()
try:
factory = registry.getFactory(command)
except KeyError:
raise UnknownCommand(u"unrecognized WorkerCommand '{0}'".format(command))
self.command = factory(self, stepId, args)
log.msg(u" startCommand:{0} [id {1}]".format(command, stepId))
self.remoteStep = stepref
self.remoteStep.notifyOnDisconnect(self.lostRemoteStep)
d = self.command.doStart()
d.addCallback(lambda res: None)
d.addBoth(self.commandComplete)
return None |
def farps(self) -> typing.Iterator['Static']:
"""
Returns: generator over all FARPs in this coalition
"""
for static in self.statics:
if static.static_is_farp:
yield static | Returns: generator over all FARPs in this coalition | Below is the the instruction that describes the task:
### Input:
Returns: generator over all FARPs in this coalition
### Response:
def farps(self) -> typing.Iterator['Static']:
"""
Returns: generator over all FARPs in this coalition
"""
for static in self.statics:
if static.static_is_farp:
yield static |
def maybe_show_tree(walker, ast):
"""
Show the ast based on the showast flag (or file object), writing to the
appropriate stream depending on the type of the flag.
:param show_tree: Flag which determines whether the parse tree is
written to sys.stdout or not. (It is also to pass a file
like object, into which the ast will be written).
:param ast: The ast to show.
"""
if walker.showast:
if hasattr(walker.showast, 'write'):
stream = walker.showast
else:
stream = sys.stdout
if walker.showast == 'Full':
walker.str_with_template(ast)
else:
stream.write(str(ast))
stream.write('\n') | Show the ast based on the showast flag (or file object), writing to the
appropriate stream depending on the type of the flag.
:param show_tree: Flag which determines whether the parse tree is
written to sys.stdout or not. (It is also to pass a file
like object, into which the ast will be written).
:param ast: The ast to show. | Below is the the instruction that describes the task:
### Input:
Show the ast based on the showast flag (or file object), writing to the
appropriate stream depending on the type of the flag.
:param show_tree: Flag which determines whether the parse tree is
written to sys.stdout or not. (It is also to pass a file
like object, into which the ast will be written).
:param ast: The ast to show.
### Response:
def maybe_show_tree(walker, ast):
"""
Show the ast based on the showast flag (or file object), writing to the
appropriate stream depending on the type of the flag.
:param show_tree: Flag which determines whether the parse tree is
written to sys.stdout or not. (It is also to pass a file
like object, into which the ast will be written).
:param ast: The ast to show.
"""
if walker.showast:
if hasattr(walker.showast, 'write'):
stream = walker.showast
else:
stream = sys.stdout
if walker.showast == 'Full':
walker.str_with_template(ast)
else:
stream.write(str(ast))
stream.write('\n') |
def _os_name_factory(settings):
"""Factory for the :r:`software_os setting` default.
"""
# pylint: disable-msg=W0613,W0142
return u"{0} {1} {2}".format(platform.system(), platform.release(),
platform.machine()) | Factory for the :r:`software_os setting` default. | Below is the the instruction that describes the task:
### Input:
Factory for the :r:`software_os setting` default.
### Response:
def _os_name_factory(settings):
"""Factory for the :r:`software_os setting` default.
"""
# pylint: disable-msg=W0613,W0142
return u"{0} {1} {2}".format(platform.system(), platform.release(),
platform.machine()) |
def api(feature='conditions', city='Portland', state='OR', key=None):
"""Use the wunderground API to get current conditions instead of scraping
Please be kind and use your own key (they're FREE!):
http://www.wunderground.com/weather/api/d/login.html
References:
http://www.wunderground.com/weather/api/d/terms.html
Examples:
>>> api('hurric', 'Boise', 'ID') # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
{u'currenthurricane': ...}}}
>>> features = ('alerts astronomy conditions currenthurricane forecast forecast10day geolookup history hourly hourly10day ' +
... 'planner rawtide satellite tide webcams yesterday').split(' ')
>> everything = [api(f, 'Portland') for f in features]
>> js = api('alerts', 'Portland', 'OR')
>> js = api('condit', 'Sacramento', 'CA')
>> js = api('forecast', 'Mobile', 'AL')
>> js = api('10day', 'Fairhope', 'AL')
>> js = api('geo', 'Decatur', 'AL')
>> js = api('hist', 'history', 'AL')
>> js = api('astro')
"""
features = ('alerts astronomy conditions currenthurricane forecast forecast10day geolookup history hourly hourly10day ' +
'planner rawtide satellite tide webcams yesterday').split(' ')
feature = util.fuzzy_get(features, feature)
# Please be kind and use your own key (they're FREE!):
# http://www.wunderground.com/weather/api/d/login.html
key = key or env.get('WUNDERGROUND', None, verbosity=-1) or env.get('WUNDERGROUND_KEY', 'c45a86c2fc63f7d0', verbosity=-1)
url = 'http://api.wunderground.com/api/{key}/{feature}/q/{state}/{city}.json'.format(
key=key, feature=feature, state=state, city=city)
return json.load(urllib.urlopen(url)) | Use the wunderground API to get current conditions instead of scraping
Please be kind and use your own key (they're FREE!):
http://www.wunderground.com/weather/api/d/login.html
References:
http://www.wunderground.com/weather/api/d/terms.html
Examples:
>>> api('hurric', 'Boise', 'ID') # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
{u'currenthurricane': ...}}}
>>> features = ('alerts astronomy conditions currenthurricane forecast forecast10day geolookup history hourly hourly10day ' +
... 'planner rawtide satellite tide webcams yesterday').split(' ')
>> everything = [api(f, 'Portland') for f in features]
>> js = api('alerts', 'Portland', 'OR')
>> js = api('condit', 'Sacramento', 'CA')
>> js = api('forecast', 'Mobile', 'AL')
>> js = api('10day', 'Fairhope', 'AL')
>> js = api('geo', 'Decatur', 'AL')
>> js = api('hist', 'history', 'AL')
>> js = api('astro') | Below is the the instruction that describes the task:
### Input:
Use the wunderground API to get current conditions instead of scraping
Please be kind and use your own key (they're FREE!):
http://www.wunderground.com/weather/api/d/login.html
References:
http://www.wunderground.com/weather/api/d/terms.html
Examples:
>>> api('hurric', 'Boise', 'ID') # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
{u'currenthurricane': ...}}}
>>> features = ('alerts astronomy conditions currenthurricane forecast forecast10day geolookup history hourly hourly10day ' +
... 'planner rawtide satellite tide webcams yesterday').split(' ')
>> everything = [api(f, 'Portland') for f in features]
>> js = api('alerts', 'Portland', 'OR')
>> js = api('condit', 'Sacramento', 'CA')
>> js = api('forecast', 'Mobile', 'AL')
>> js = api('10day', 'Fairhope', 'AL')
>> js = api('geo', 'Decatur', 'AL')
>> js = api('hist', 'history', 'AL')
>> js = api('astro')
### Response:
def api(feature='conditions', city='Portland', state='OR', key=None):
"""Use the wunderground API to get current conditions instead of scraping
Please be kind and use your own key (they're FREE!):
http://www.wunderground.com/weather/api/d/login.html
References:
http://www.wunderground.com/weather/api/d/terms.html
Examples:
>>> api('hurric', 'Boise', 'ID') # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
{u'currenthurricane': ...}}}
>>> features = ('alerts astronomy conditions currenthurricane forecast forecast10day geolookup history hourly hourly10day ' +
... 'planner rawtide satellite tide webcams yesterday').split(' ')
>> everything = [api(f, 'Portland') for f in features]
>> js = api('alerts', 'Portland', 'OR')
>> js = api('condit', 'Sacramento', 'CA')
>> js = api('forecast', 'Mobile', 'AL')
>> js = api('10day', 'Fairhope', 'AL')
>> js = api('geo', 'Decatur', 'AL')
>> js = api('hist', 'history', 'AL')
>> js = api('astro')
"""
features = ('alerts astronomy conditions currenthurricane forecast forecast10day geolookup history hourly hourly10day ' +
'planner rawtide satellite tide webcams yesterday').split(' ')
feature = util.fuzzy_get(features, feature)
# Please be kind and use your own key (they're FREE!):
# http://www.wunderground.com/weather/api/d/login.html
key = key or env.get('WUNDERGROUND', None, verbosity=-1) or env.get('WUNDERGROUND_KEY', 'c45a86c2fc63f7d0', verbosity=-1)
url = 'http://api.wunderground.com/api/{key}/{feature}/q/{state}/{city}.json'.format(
key=key, feature=feature, state=state, city=city)
return json.load(urllib.urlopen(url)) |
def plan_first_phase(N1, N2):
"""
Create a plan for the first stage of the pruned FFT operation.
(Alex to provide a write up with more details.)
Parameters
-----------
N1 : int
Number of rows.
N2 : int
Number of columns.
Returns
--------
plan : FFTWF plan
The plan for performing the first phase FFT.
"""
N = N1*N2
vin = pycbc.types.zeros(N, dtype=numpy.complex64)
vout = pycbc.types.zeros(N, dtype=numpy.complex64)
f = float_lib.fftwf_plan_many_dft
f.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_int, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_int, ctypes.c_int,
ctypes.c_int, ctypes.c_int]
f.restype = ctypes.c_void_p
return f(1, ctypes.byref(ctypes.c_int(N2)), N1,
vin.ptr, None, 1, N2,
vout.ptr, None, 1, N2, FFTW_BACKWARD, FFTW_MEASURE) | Create a plan for the first stage of the pruned FFT operation.
(Alex to provide a write up with more details.)
Parameters
-----------
N1 : int
Number of rows.
N2 : int
Number of columns.
Returns
--------
plan : FFTWF plan
The plan for performing the first phase FFT. | Below is the the instruction that describes the task:
### Input:
Create a plan for the first stage of the pruned FFT operation.
(Alex to provide a write up with more details.)
Parameters
-----------
N1 : int
Number of rows.
N2 : int
Number of columns.
Returns
--------
plan : FFTWF plan
The plan for performing the first phase FFT.
### Response:
def plan_first_phase(N1, N2):
"""
Create a plan for the first stage of the pruned FFT operation.
(Alex to provide a write up with more details.)
Parameters
-----------
N1 : int
Number of rows.
N2 : int
Number of columns.
Returns
--------
plan : FFTWF plan
The plan for performing the first phase FFT.
"""
N = N1*N2
vin = pycbc.types.zeros(N, dtype=numpy.complex64)
vout = pycbc.types.zeros(N, dtype=numpy.complex64)
f = float_lib.fftwf_plan_many_dft
f.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_int, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_int, ctypes.c_int,
ctypes.c_int, ctypes.c_int]
f.restype = ctypes.c_void_p
return f(1, ctypes.byref(ctypes.c_int(N2)), N1,
vin.ptr, None, 1, N2,
vout.ptr, None, 1, N2, FFTW_BACKWARD, FFTW_MEASURE) |
def is_birthday(self, dt=None):
"""
Check if its the birthday.
Compares the date/month values of the two dates.
:rtype: bool
"""
if dt is None:
dt = self.now(self.tz)
instance = pendulum.instance(dt)
return (self.month, self.day) == (instance.month, instance.day) | Check if its the birthday.
Compares the date/month values of the two dates.
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Check if its the birthday.
Compares the date/month values of the two dates.
:rtype: bool
### Response:
def is_birthday(self, dt=None):
"""
Check if its the birthday.
Compares the date/month values of the two dates.
:rtype: bool
"""
if dt is None:
dt = self.now(self.tz)
instance = pendulum.instance(dt)
return (self.month, self.day) == (instance.month, instance.day) |
def update_derived_metric(self, id, **kwargs): # noqa: E501
"""Update a specific derived metric definition # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_derived_metric(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param DerivedMetricDefinition body: Example Body: <pre>{ \"id\": \"1459375928549\", \"name\": \"Query Name\", \"createUserId\": \"user\", \"query\": \"aliasMetric(ts(~sample.cpu.loadavg.1m), \\\"my.new.metric\\\")\", \"minutes\": 5, \"additionalInformation\": \"Additional Info\" }</pre>
:return: ResponseContainerDerivedMetricDefinition
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_derived_metric_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.update_derived_metric_with_http_info(id, **kwargs) # noqa: E501
return data | Update a specific derived metric definition # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_derived_metric(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param DerivedMetricDefinition body: Example Body: <pre>{ \"id\": \"1459375928549\", \"name\": \"Query Name\", \"createUserId\": \"user\", \"query\": \"aliasMetric(ts(~sample.cpu.loadavg.1m), \\\"my.new.metric\\\")\", \"minutes\": 5, \"additionalInformation\": \"Additional Info\" }</pre>
:return: ResponseContainerDerivedMetricDefinition
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Update a specific derived metric definition # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_derived_metric(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param DerivedMetricDefinition body: Example Body: <pre>{ \"id\": \"1459375928549\", \"name\": \"Query Name\", \"createUserId\": \"user\", \"query\": \"aliasMetric(ts(~sample.cpu.loadavg.1m), \\\"my.new.metric\\\")\", \"minutes\": 5, \"additionalInformation\": \"Additional Info\" }</pre>
:return: ResponseContainerDerivedMetricDefinition
If the method is called asynchronously,
returns the request thread.
### Response:
def update_derived_metric(self, id, **kwargs): # noqa: E501
"""Update a specific derived metric definition # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_derived_metric(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param DerivedMetricDefinition body: Example Body: <pre>{ \"id\": \"1459375928549\", \"name\": \"Query Name\", \"createUserId\": \"user\", \"query\": \"aliasMetric(ts(~sample.cpu.loadavg.1m), \\\"my.new.metric\\\")\", \"minutes\": 5, \"additionalInformation\": \"Additional Info\" }</pre>
:return: ResponseContainerDerivedMetricDefinition
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_derived_metric_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.update_derived_metric_with_http_info(id, **kwargs) # noqa: E501
return data |
def _fix_overlapping_graphics( spansStartingFrom ):
''' Provides a fix for overlapping annotations that are formatted graphically
(underlined or printed in non-default color).
If two graphically formatted annotations overlap, and if one annotation,
say A, ends within another annotation, say B, then ending of graphics of A
also causes graphics of B to end, and so, the end of A should restart the
the graphics of B for a continuous visualisation;
This method modifies ending tags in a way that annotations ending within
other annotations will also contain restarts of the corresponding (super)-
annotations, so that a continuous formatting is ensured.
'''
for startIndex in sorted( spansStartingFrom.keys() ):
for span1 in spansStartingFrom[startIndex]:
# If the span is not graphic, we don't have no worries - we can just skip it
if not span1[5]:
continue
# Otherwise: check for other graphic spans that overlap with the given span
span1Start = span1[0]
span1End = span1[1]
for i in range( span1Start, span1End ):
if i in spansStartingFrom:
for span2 in spansStartingFrom[i]:
span2Start = span2[0]
span2End = span2[1]
# If the spans are not the same, and the span2 is graphic
if span2 != span1 and span2[5]:
# if the overlapping graphic span ends before the current span,
# we have to restart the graphic formatting of given span after
# the end of the overlapping span
if span2End <= span1End:
if not span1[6]:
# If span1 is not bracketed, just add it at the end of
# the overlapping span
span2[4] += span1[3]
else:
# If span1 is bracketed, add it at the end of the
# overlapping span without brackets
wb = span1[3].rstrip('[')
span2[4] += wb | Provides a fix for overlapping annotations that are formatted graphically
(underlined or printed in non-default color).
If two graphically formatted annotations overlap, and if one annotation,
say A, ends within another annotation, say B, then ending of graphics of A
also causes graphics of B to end, and so, the end of A should restart the
the graphics of B for a continuous visualisation;
This method modifies ending tags in a way that annotations ending within
other annotations will also contain restarts of the corresponding (super)-
annotations, so that a continuous formatting is ensured. | Below is the the instruction that describes the task:
### Input:
Provides a fix for overlapping annotations that are formatted graphically
(underlined or printed in non-default color).
If two graphically formatted annotations overlap, and if one annotation,
say A, ends within another annotation, say B, then ending of graphics of A
also causes graphics of B to end, and so, the end of A should restart the
the graphics of B for a continuous visualisation;
This method modifies ending tags in a way that annotations ending within
other annotations will also contain restarts of the corresponding (super)-
annotations, so that a continuous formatting is ensured.
### Response:
def _fix_overlapping_graphics( spansStartingFrom ):
''' Provides a fix for overlapping annotations that are formatted graphically
(underlined or printed in non-default color).
If two graphically formatted annotations overlap, and if one annotation,
say A, ends within another annotation, say B, then ending of graphics of A
also causes graphics of B to end, and so, the end of A should restart the
the graphics of B for a continuous visualisation;
This method modifies ending tags in a way that annotations ending within
other annotations will also contain restarts of the corresponding (super)-
annotations, so that a continuous formatting is ensured.
'''
for startIndex in sorted( spansStartingFrom.keys() ):
for span1 in spansStartingFrom[startIndex]:
# If the span is not graphic, we don't have no worries - we can just skip it
if not span1[5]:
continue
# Otherwise: check for other graphic spans that overlap with the given span
span1Start = span1[0]
span1End = span1[1]
for i in range( span1Start, span1End ):
if i in spansStartingFrom:
for span2 in spansStartingFrom[i]:
span2Start = span2[0]
span2End = span2[1]
# If the spans are not the same, and the span2 is graphic
if span2 != span1 and span2[5]:
# if the overlapping graphic span ends before the current span,
# we have to restart the graphic formatting of given span after
# the end of the overlapping span
if span2End <= span1End:
if not span1[6]:
# If span1 is not bracketed, just add it at the end of
# the overlapping span
span2[4] += span1[3]
else:
# If span1 is bracketed, add it at the end of the
# overlapping span without brackets
wb = span1[3].rstrip('[')
span2[4] += wb |
def get_counter(self, name=None):
'''Shortcut for getting a :class:`~statsd.counter.Counter` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Counter) | Shortcut for getting a :class:`~statsd.counter.Counter` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str | Below is the the instruction that describes the task:
### Input:
Shortcut for getting a :class:`~statsd.counter.Counter` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
### Response:
def get_counter(self, name=None):
'''Shortcut for getting a :class:`~statsd.counter.Counter` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Counter) |
def evalRanges(self, datetimeString, sourceTime=None):
"""
Evaluate the C{datetimeString} text and determine if
it represents a date or time range.
@type datetimeString: string
@param datetimeString: datetime text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: start datetime, end datetime and the invalid flag
"""
rangeFlag = retFlag = 0
startStr = endStr = ''
s = datetimeString.strip().lower()
if self.ptc.rangeSep in s:
s = s.replace(self.ptc.rangeSep, ' %s ' % self.ptc.rangeSep)
s = s.replace(' ', ' ')
for cre, rflag in [(self.ptc.CRE_TIMERNG1, 1),
(self.ptc.CRE_TIMERNG2, 2),
(self.ptc.CRE_TIMERNG4, 7),
(self.ptc.CRE_TIMERNG3, 3),
(self.ptc.CRE_DATERNG1, 4),
(self.ptc.CRE_DATERNG2, 5),
(self.ptc.CRE_DATERNG3, 6)]:
m = cre.search(s)
if m is not None:
rangeFlag = rflag
break
debug and log.debug('evalRanges: rangeFlag = %s [%s]', rangeFlag, s)
if m is not None:
if (m.group() != s):
# capture remaining string
parseStr = m.group()
chunk1 = s[:m.start()]
chunk2 = s[m.end():]
s = '%s %s' % (chunk1, chunk2)
sourceTime, ctx = self.parse(s, sourceTime,
VERSION_CONTEXT_STYLE)
if not ctx.hasDateOrTime:
sourceTime = None
else:
parseStr = s
if rangeFlag in (1, 2):
m = re.search(self.ptc.rangeSep, parseStr)
startStr = parseStr[:m.start()]
endStr = parseStr[m.start() + 1:]
retFlag = 2
elif rangeFlag in (3, 7):
m = re.search(self.ptc.rangeSep, parseStr)
# capturing the meridian from the end time
if self.ptc.usesMeridian:
ampm = re.search(self.ptc.am[0], parseStr)
# appending the meridian to the start time
if ampm is not None:
startStr = parseStr[:m.start()] + self.ptc.meridian[0]
else:
startStr = parseStr[:m.start()] + self.ptc.meridian[1]
else:
startStr = parseStr[:m.start()]
endStr = parseStr[m.start() + 1:]
retFlag = 2
elif rangeFlag == 4:
m = re.search(self.ptc.rangeSep, parseStr)
startStr = parseStr[:m.start()]
endStr = parseStr[m.start() + 1:]
retFlag = 1
elif rangeFlag == 5:
m = re.search(self.ptc.rangeSep, parseStr)
endStr = parseStr[m.start() + 1:]
# capturing the year from the end date
date = self.ptc.CRE_DATE3.search(endStr)
endYear = date.group('year')
# appending the year to the start date if the start date
# does not have year information and the end date does.
# eg : "Aug 21 - Sep 4, 2007"
if endYear is not None:
startStr = (parseStr[:m.start()]).strip()
date = self.ptc.CRE_DATE3.search(startStr)
startYear = date.group('year')
if startYear is None:
startStr = startStr + ', ' + endYear
else:
startStr = parseStr[:m.start()]
retFlag = 1
elif rangeFlag == 6:
m = re.search(self.ptc.rangeSep, parseStr)
startStr = parseStr[:m.start()]
# capturing the month from the start date
mth = self.ptc.CRE_DATE3.search(startStr)
mth = mth.group('mthname')
# appending the month name to the end date
endStr = mth + parseStr[(m.start() + 1):]
retFlag = 1
else:
# if range is not found
startDT = endDT = time.localtime()
if retFlag:
startDT, sctx = self.parse(startStr, sourceTime,
VERSION_CONTEXT_STYLE)
endDT, ectx = self.parse(endStr, sourceTime,
VERSION_CONTEXT_STYLE)
if not sctx.hasDateOrTime or not ectx.hasDateOrTime:
retFlag = 0
return startDT, endDT, retFlag | Evaluate the C{datetimeString} text and determine if
it represents a date or time range.
@type datetimeString: string
@param datetimeString: datetime text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: start datetime, end datetime and the invalid flag | Below is the the instruction that describes the task:
### Input:
Evaluate the C{datetimeString} text and determine if
it represents a date or time range.
@type datetimeString: string
@param datetimeString: datetime text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: start datetime, end datetime and the invalid flag
### Response:
def evalRanges(self, datetimeString, sourceTime=None):
"""
Evaluate the C{datetimeString} text and determine if
it represents a date or time range.
@type datetimeString: string
@param datetimeString: datetime text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: start datetime, end datetime and the invalid flag
"""
rangeFlag = retFlag = 0
startStr = endStr = ''
s = datetimeString.strip().lower()
if self.ptc.rangeSep in s:
s = s.replace(self.ptc.rangeSep, ' %s ' % self.ptc.rangeSep)
s = s.replace(' ', ' ')
for cre, rflag in [(self.ptc.CRE_TIMERNG1, 1),
(self.ptc.CRE_TIMERNG2, 2),
(self.ptc.CRE_TIMERNG4, 7),
(self.ptc.CRE_TIMERNG3, 3),
(self.ptc.CRE_DATERNG1, 4),
(self.ptc.CRE_DATERNG2, 5),
(self.ptc.CRE_DATERNG3, 6)]:
m = cre.search(s)
if m is not None:
rangeFlag = rflag
break
debug and log.debug('evalRanges: rangeFlag = %s [%s]', rangeFlag, s)
if m is not None:
if (m.group() != s):
# capture remaining string
parseStr = m.group()
chunk1 = s[:m.start()]
chunk2 = s[m.end():]
s = '%s %s' % (chunk1, chunk2)
sourceTime, ctx = self.parse(s, sourceTime,
VERSION_CONTEXT_STYLE)
if not ctx.hasDateOrTime:
sourceTime = None
else:
parseStr = s
if rangeFlag in (1, 2):
m = re.search(self.ptc.rangeSep, parseStr)
startStr = parseStr[:m.start()]
endStr = parseStr[m.start() + 1:]
retFlag = 2
elif rangeFlag in (3, 7):
m = re.search(self.ptc.rangeSep, parseStr)
# capturing the meridian from the end time
if self.ptc.usesMeridian:
ampm = re.search(self.ptc.am[0], parseStr)
# appending the meridian to the start time
if ampm is not None:
startStr = parseStr[:m.start()] + self.ptc.meridian[0]
else:
startStr = parseStr[:m.start()] + self.ptc.meridian[1]
else:
startStr = parseStr[:m.start()]
endStr = parseStr[m.start() + 1:]
retFlag = 2
elif rangeFlag == 4:
m = re.search(self.ptc.rangeSep, parseStr)
startStr = parseStr[:m.start()]
endStr = parseStr[m.start() + 1:]
retFlag = 1
elif rangeFlag == 5:
m = re.search(self.ptc.rangeSep, parseStr)
endStr = parseStr[m.start() + 1:]
# capturing the year from the end date
date = self.ptc.CRE_DATE3.search(endStr)
endYear = date.group('year')
# appending the year to the start date if the start date
# does not have year information and the end date does.
# eg : "Aug 21 - Sep 4, 2007"
if endYear is not None:
startStr = (parseStr[:m.start()]).strip()
date = self.ptc.CRE_DATE3.search(startStr)
startYear = date.group('year')
if startYear is None:
startStr = startStr + ', ' + endYear
else:
startStr = parseStr[:m.start()]
retFlag = 1
elif rangeFlag == 6:
m = re.search(self.ptc.rangeSep, parseStr)
startStr = parseStr[:m.start()]
# capturing the month from the start date
mth = self.ptc.CRE_DATE3.search(startStr)
mth = mth.group('mthname')
# appending the month name to the end date
endStr = mth + parseStr[(m.start() + 1):]
retFlag = 1
else:
# if range is not found
startDT = endDT = time.localtime()
if retFlag:
startDT, sctx = self.parse(startStr, sourceTime,
VERSION_CONTEXT_STYLE)
endDT, ectx = self.parse(endStr, sourceTime,
VERSION_CONTEXT_STYLE)
if not sctx.hasDateOrTime or not ectx.hasDateOrTime:
retFlag = 0
return startDT, endDT, retFlag |
def resolve_cert_reqs(candidate):
"""
Resolves the argument to a numeric constant, which can be passed to
the wrap_socket function/method from the ssl module.
Defaults to :data:`ssl.CERT_NONE`.
If given a string it is assumed to be the name of the constant in the
:mod:`ssl` module or its abbreviation.
(So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
If it's neither `None` nor a string we assume it is already the numeric
constant which can directly be passed to wrap_socket.
"""
if candidate is None:
return CERT_NONE
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'CERT_' + candidate)
return res
return candidate | Resolves the argument to a numeric constant, which can be passed to
the wrap_socket function/method from the ssl module.
Defaults to :data:`ssl.CERT_NONE`.
If given a string it is assumed to be the name of the constant in the
:mod:`ssl` module or its abbreviation.
(So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
If it's neither `None` nor a string we assume it is already the numeric
constant which can directly be passed to wrap_socket. | Below is the the instruction that describes the task:
### Input:
Resolves the argument to a numeric constant, which can be passed to
the wrap_socket function/method from the ssl module.
Defaults to :data:`ssl.CERT_NONE`.
If given a string it is assumed to be the name of the constant in the
:mod:`ssl` module or its abbreviation.
(So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
If it's neither `None` nor a string we assume it is already the numeric
constant which can directly be passed to wrap_socket.
### Response:
def resolve_cert_reqs(candidate):
"""
Resolves the argument to a numeric constant, which can be passed to
the wrap_socket function/method from the ssl module.
Defaults to :data:`ssl.CERT_NONE`.
If given a string it is assumed to be the name of the constant in the
:mod:`ssl` module or its abbreviation.
(So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
If it's neither `None` nor a string we assume it is already the numeric
constant which can directly be passed to wrap_socket.
"""
if candidate is None:
return CERT_NONE
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'CERT_' + candidate)
return res
return candidate |
def disconnect_relationship(cls, id, related_collection_name, request_json):
"""
Disconnect one or more relationship in a collection with cardinality 'Many'.
:param id: The 'id' field of the node on the left side of the relationship in the database. The id field must \
be set in the model -- it is not the same as the node id
:param related_collection_name: The name of the relationship
:param request_json: a dictionary formatted according to the specification at \
http://jsonapi.org/format/#crud-updating-relationships
:return: A response according to the same specification
"""
try:
this_resource = cls.nodes.get(id=id, active=True)
related_collection = getattr(this_resource, related_collection_name)
rsrc_identifier_list = request_json['data']
if not isinstance(rsrc_identifier_list, list):
raise WrongTypeError
for rsrc_identifier in rsrc_identifier_list:
connected_resource = cls.get_class_from_type(rsrc_identifier['type']).nodes.get(
id=rsrc_identifier['id']
)
related_collection.disconnect(connected_resource)
r = make_response('')
r.status_code = http_error_codes.NO_CONTENT
r.headers['Content-Type'] = CONTENT_TYPE
except DoesNotExist:
r = application_codes.error_response([application_codes.RESOURCE_NOT_FOUND])
except (KeyError, WrongTypeError):
r = application_codes.error_response([application_codes.BAD_FORMAT_VIOLATION])
return r | Disconnect one or more relationship in a collection with cardinality 'Many'.
:param id: The 'id' field of the node on the left side of the relationship in the database. The id field must \
be set in the model -- it is not the same as the node id
:param related_collection_name: The name of the relationship
:param request_json: a dictionary formatted according to the specification at \
http://jsonapi.org/format/#crud-updating-relationships
:return: A response according to the same specification | Below is the the instruction that describes the task:
### Input:
Disconnect one or more relationship in a collection with cardinality 'Many'.
:param id: The 'id' field of the node on the left side of the relationship in the database. The id field must \
be set in the model -- it is not the same as the node id
:param related_collection_name: The name of the relationship
:param request_json: a dictionary formatted according to the specification at \
http://jsonapi.org/format/#crud-updating-relationships
:return: A response according to the same specification
### Response:
def disconnect_relationship(cls, id, related_collection_name, request_json):
"""
Disconnect one or more relationship in a collection with cardinality 'Many'.
:param id: The 'id' field of the node on the left side of the relationship in the database. The id field must \
be set in the model -- it is not the same as the node id
:param related_collection_name: The name of the relationship
:param request_json: a dictionary formatted according to the specification at \
http://jsonapi.org/format/#crud-updating-relationships
:return: A response according to the same specification
"""
try:
this_resource = cls.nodes.get(id=id, active=True)
related_collection = getattr(this_resource, related_collection_name)
rsrc_identifier_list = request_json['data']
if not isinstance(rsrc_identifier_list, list):
raise WrongTypeError
for rsrc_identifier in rsrc_identifier_list:
connected_resource = cls.get_class_from_type(rsrc_identifier['type']).nodes.get(
id=rsrc_identifier['id']
)
related_collection.disconnect(connected_resource)
r = make_response('')
r.status_code = http_error_codes.NO_CONTENT
r.headers['Content-Type'] = CONTENT_TYPE
except DoesNotExist:
r = application_codes.error_response([application_codes.RESOURCE_NOT_FOUND])
except (KeyError, WrongTypeError):
r = application_codes.error_response([application_codes.BAD_FORMAT_VIOLATION])
return r |
def from_arrays(**arrays):
"""Create an in memory DataFrame from numpy arrays.
Example
>>> import vaex, numpy as np
>>> x = np.arange(5)
>>> y = x ** 2
>>> vaex.from_arrays(x=x, y=y)
# x y
0 0 0
1 1 1
2 2 4
3 3 9
4 4 16
>>> some_dict = {'x': x, 'y': y}
>>> vaex.from_arrays(**some_dict) # in case you have your columns in a dict
# x y
0 0 0
1 1 1
2 2 4
3 3 9
4 4 16
:param arrays: keyword arguments with arrays
:rtype: DataFrame
"""
import numpy as np
import six
from .column import Column
df = vaex.dataframe.DataFrameArrays("array")
for name, array in arrays.items():
if isinstance(array, Column):
df.add_column(name, array)
else:
array = np.asanyarray(array)
df.add_column(name, array)
return df | Create an in memory DataFrame from numpy arrays.
Example
>>> import vaex, numpy as np
>>> x = np.arange(5)
>>> y = x ** 2
>>> vaex.from_arrays(x=x, y=y)
# x y
0 0 0
1 1 1
2 2 4
3 3 9
4 4 16
>>> some_dict = {'x': x, 'y': y}
>>> vaex.from_arrays(**some_dict) # in case you have your columns in a dict
# x y
0 0 0
1 1 1
2 2 4
3 3 9
4 4 16
:param arrays: keyword arguments with arrays
:rtype: DataFrame | Below is the the instruction that describes the task:
### Input:
Create an in memory DataFrame from numpy arrays.
Example
>>> import vaex, numpy as np
>>> x = np.arange(5)
>>> y = x ** 2
>>> vaex.from_arrays(x=x, y=y)
# x y
0 0 0
1 1 1
2 2 4
3 3 9
4 4 16
>>> some_dict = {'x': x, 'y': y}
>>> vaex.from_arrays(**some_dict) # in case you have your columns in a dict
# x y
0 0 0
1 1 1
2 2 4
3 3 9
4 4 16
:param arrays: keyword arguments with arrays
:rtype: DataFrame
### Response:
def from_arrays(**arrays):
"""Create an in memory DataFrame from numpy arrays.
Example
>>> import vaex, numpy as np
>>> x = np.arange(5)
>>> y = x ** 2
>>> vaex.from_arrays(x=x, y=y)
# x y
0 0 0
1 1 1
2 2 4
3 3 9
4 4 16
>>> some_dict = {'x': x, 'y': y}
>>> vaex.from_arrays(**some_dict) # in case you have your columns in a dict
# x y
0 0 0
1 1 1
2 2 4
3 3 9
4 4 16
:param arrays: keyword arguments with arrays
:rtype: DataFrame
"""
import numpy as np
import six
from .column import Column
df = vaex.dataframe.DataFrameArrays("array")
for name, array in arrays.items():
if isinstance(array, Column):
df.add_column(name, array)
else:
array = np.asanyarray(array)
df.add_column(name, array)
return df |
def resolve_colors(self, colorstack):
'Returns the curses attribute for the colorstack, a list of color option names sorted highest-precedence color first.'
attr = CursesAttr()
for coloropt in colorstack:
c = self.get_color(coloropt)
attr = attr.update_attr(c)
return attr | Returns the curses attribute for the colorstack, a list of color option names sorted highest-precedence color first. | Below is the the instruction that describes the task:
### Input:
Returns the curses attribute for the colorstack, a list of color option names sorted highest-precedence color first.
### Response:
def resolve_colors(self, colorstack):
'Returns the curses attribute for the colorstack, a list of color option names sorted highest-precedence color first.'
attr = CursesAttr()
for coloropt in colorstack:
c = self.get_color(coloropt)
attr = attr.update_attr(c)
return attr |
def iter_org_events(self, org, number=-1, etag=None):
"""Iterate over events as they appear on the user's organization
dashboard. You must be authenticated to view this.
:param str org: (required), name of the organization
:param int number: (optional), number of events to return. Default: -1
returns all available events
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: list of :class:`Event <github3.events.Event>`\ s
"""
url = ''
if org:
url = self._build_url('events', 'orgs', org, base_url=self._api)
return self._iter(int(number), url, Event, etag=etag) | Iterate over events as they appear on the user's organization
dashboard. You must be authenticated to view this.
:param str org: (required), name of the organization
:param int number: (optional), number of events to return. Default: -1
returns all available events
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: list of :class:`Event <github3.events.Event>`\ s | Below is the the instruction that describes the task:
### Input:
Iterate over events as they appear on the user's organization
dashboard. You must be authenticated to view this.
:param str org: (required), name of the organization
:param int number: (optional), number of events to return. Default: -1
returns all available events
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: list of :class:`Event <github3.events.Event>`\ s
### Response:
def iter_org_events(self, org, number=-1, etag=None):
"""Iterate over events as they appear on the user's organization
dashboard. You must be authenticated to view this.
:param str org: (required), name of the organization
:param int number: (optional), number of events to return. Default: -1
returns all available events
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: list of :class:`Event <github3.events.Event>`\ s
"""
url = ''
if org:
url = self._build_url('events', 'orgs', org, base_url=self._api)
return self._iter(int(number), url, Event, etag=etag) |
def records(self):
"""
Parse the DIAMOND output and yield records. This will be used to read
original DIAMOND output (either from stdin or from a file) to turn the
DIAMOND results into Python dictionaries that will then be stored in
our JSON format.
@return: A generator that produces C{dict}s containing 'alignments' and
'query' C{str} keys.
"""
with as_handle(self._filename) as fp:
previousQtitle = None
subjectsSeen = {}
record = {}
for line in fp:
line = line[:-1]
try:
(qtitle, stitle, bitscore, evalue, qframe, qseq,
qstart, qend, sseq, sstart, send, slen, btop, nident,
positive) = line.split('\t')
except ValueError as e:
# We may not be able to find 'nident' and 'positives'
# because they were added in version 2.0.3 and will not
# be present in any of our JSON output generated before
# that. So those values will be None when reading
# DIAMOND output without those fields, but that's much
# better than no longer being able to read that data.
if six.PY2:
error = 'need more than 13 values to unpack'
else:
error = (
'not enough values to unpack (expected 15, '
'got 13)')
if str(e) == error:
(qtitle, stitle, bitscore, evalue, qframe,
qseq, qstart, qend, sseq, sstart, send, slen,
btop) = line.split('\t')
nident = positive = None
else:
raise
hsp = {
'bits': float(bitscore),
'btop': btop,
'expect': float(evalue),
'frame': int(qframe),
'identicalCount': None if nident is None else int(nident),
'positiveCount': (
None if positive is None else int(positive)),
'query': qseq,
'query_start': int(qstart),
'query_end': int(qend),
'sbjct': sseq,
'sbjct_start': int(sstart),
'sbjct_end': int(send),
}
if previousQtitle == qtitle:
# We have already started accumulating alignments for this
# query.
if stitle not in subjectsSeen:
# We have not seen this subject before, so this is a
# new alignment.
subjectsSeen.add(stitle)
alignment = {
'hsps': [hsp],
'length': int(slen),
'title': stitle,
}
record['alignments'].append(alignment)
else:
# We have already seen this subject, so this is another
# HSP in an already existing alignment.
for alignment in record['alignments']:
if alignment['title'] == stitle:
alignment['hsps'].append(hsp)
break
else:
# All alignments for the previous query id (if any)
# have been seen.
if previousQtitle is not None:
yield record
# Start building up the new record.
record = {}
subjectsSeen = {stitle}
alignment = {
'hsps': [hsp],
'length': int(slen),
'title': stitle,
}
record['alignments'] = [alignment]
record['query'] = qtitle
previousQtitle = qtitle
# Yield the last record, if any.
if record:
yield record | Parse the DIAMOND output and yield records. This will be used to read
original DIAMOND output (either from stdin or from a file) to turn the
DIAMOND results into Python dictionaries that will then be stored in
our JSON format.
@return: A generator that produces C{dict}s containing 'alignments' and
'query' C{str} keys. | Below is the the instruction that describes the task:
### Input:
Parse the DIAMOND output and yield records. This will be used to read
original DIAMOND output (either from stdin or from a file) to turn the
DIAMOND results into Python dictionaries that will then be stored in
our JSON format.
@return: A generator that produces C{dict}s containing 'alignments' and
'query' C{str} keys.
### Response:
def records(self):
"""
Parse the DIAMOND output and yield records. This will be used to read
original DIAMOND output (either from stdin or from a file) to turn the
DIAMOND results into Python dictionaries that will then be stored in
our JSON format.
@return: A generator that produces C{dict}s containing 'alignments' and
'query' C{str} keys.
"""
with as_handle(self._filename) as fp:
previousQtitle = None
subjectsSeen = {}
record = {}
for line in fp:
line = line[:-1]
try:
(qtitle, stitle, bitscore, evalue, qframe, qseq,
qstart, qend, sseq, sstart, send, slen, btop, nident,
positive) = line.split('\t')
except ValueError as e:
# We may not be able to find 'nident' and 'positives'
# because they were added in version 2.0.3 and will not
# be present in any of our JSON output generated before
# that. So those values will be None when reading
# DIAMOND output without those fields, but that's much
# better than no longer being able to read that data.
if six.PY2:
error = 'need more than 13 values to unpack'
else:
error = (
'not enough values to unpack (expected 15, '
'got 13)')
if str(e) == error:
(qtitle, stitle, bitscore, evalue, qframe,
qseq, qstart, qend, sseq, sstart, send, slen,
btop) = line.split('\t')
nident = positive = None
else:
raise
hsp = {
'bits': float(bitscore),
'btop': btop,
'expect': float(evalue),
'frame': int(qframe),
'identicalCount': None if nident is None else int(nident),
'positiveCount': (
None if positive is None else int(positive)),
'query': qseq,
'query_start': int(qstart),
'query_end': int(qend),
'sbjct': sseq,
'sbjct_start': int(sstart),
'sbjct_end': int(send),
}
if previousQtitle == qtitle:
# We have already started accumulating alignments for this
# query.
if stitle not in subjectsSeen:
# We have not seen this subject before, so this is a
# new alignment.
subjectsSeen.add(stitle)
alignment = {
'hsps': [hsp],
'length': int(slen),
'title': stitle,
}
record['alignments'].append(alignment)
else:
# We have already seen this subject, so this is another
# HSP in an already existing alignment.
for alignment in record['alignments']:
if alignment['title'] == stitle:
alignment['hsps'].append(hsp)
break
else:
# All alignments for the previous query id (if any)
# have been seen.
if previousQtitle is not None:
yield record
# Start building up the new record.
record = {}
subjectsSeen = {stitle}
alignment = {
'hsps': [hsp],
'length': int(slen),
'title': stitle,
}
record['alignments'] = [alignment]
record['query'] = qtitle
previousQtitle = qtitle
# Yield the last record, if any.
if record:
yield record |
def plot_compare(self, other_plotter):
"""
plot two band structure for comparison. One is in red the other in blue.
The two band structures need to be defined on the same symmetry lines!
and the distance between symmetry lines is
the one of the band structure used to build the PhononBSPlotter
Args:
another PhononBSPlotter object defined along the same symmetry lines
Returns:
a matplotlib object with both band structures
"""
data_orig = self.bs_plot_data()
data = other_plotter.bs_plot_data()
if len(data_orig['distances']) != len(data['distances']):
raise ValueError('The two objects are not compatible.')
plt = self.get_plot()
band_linewidth = 1
for i in range(other_plotter._nb_bands):
for d in range(len(data_orig['distances'])):
plt.plot(data_orig['distances'][d],
[e[i] for e in data['frequency']][d],
'r-', linewidth=band_linewidth)
return plt | plot two band structure for comparison. One is in red the other in blue.
The two band structures need to be defined on the same symmetry lines!
and the distance between symmetry lines is
the one of the band structure used to build the PhononBSPlotter
Args:
another PhononBSPlotter object defined along the same symmetry lines
Returns:
a matplotlib object with both band structures | Below is the the instruction that describes the task:
### Input:
plot two band structure for comparison. One is in red the other in blue.
The two band structures need to be defined on the same symmetry lines!
and the distance between symmetry lines is
the one of the band structure used to build the PhononBSPlotter
Args:
another PhononBSPlotter object defined along the same symmetry lines
Returns:
a matplotlib object with both band structures
### Response:
def plot_compare(self, other_plotter):
"""
plot two band structure for comparison. One is in red the other in blue.
The two band structures need to be defined on the same symmetry lines!
and the distance between symmetry lines is
the one of the band structure used to build the PhononBSPlotter
Args:
another PhononBSPlotter object defined along the same symmetry lines
Returns:
a matplotlib object with both band structures
"""
data_orig = self.bs_plot_data()
data = other_plotter.bs_plot_data()
if len(data_orig['distances']) != len(data['distances']):
raise ValueError('The two objects are not compatible.')
plt = self.get_plot()
band_linewidth = 1
for i in range(other_plotter._nb_bands):
for d in range(len(data_orig['distances'])):
plt.plot(data_orig['distances'][d],
[e[i] for e in data['frequency']][d],
'r-', linewidth=band_linewidth)
return plt |
def unifrac(self, weighted=True, rank="auto"):
"""A beta diversity metric that takes into account the relative relatedness of community
members. Weighted UniFrac looks at abundances, unweighted UniFrac looks at presence.
Parameters
----------
weighted : `bool`
Calculate the weighted (True) or unweighted (False) distance metric.
rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional
Analysis will be restricted to abundances of taxa at the specified level.
Returns
-------
skbio.stats.distance.DistanceMatrix, a distance matrix.
"""
# needs read counts, not relative abundances
if self._guess_normalized():
raise OneCodexException("UniFrac requires unnormalized read counts.")
df = self.to_df(rank=rank, normalize=False)
counts = []
for c_id in df.index:
counts.append(df.loc[c_id].tolist())
tax_ids = df.keys().tolist()
tree = self.tree_build()
tree = self.tree_prune_rank(tree, rank=df.ocx_rank)
# there's a bug (?) in skbio where it expects the root to only have
# one child, so we do a little faking here
from skbio.tree import TreeNode
new_tree = TreeNode(name="fake root")
new_tree.rank = "no rank"
new_tree.append(tree)
# then finally run the calculation and return
if weighted:
return skbio.diversity.beta_diversity(
"weighted_unifrac", counts, df.index.tolist(), tree=new_tree, otu_ids=tax_ids
)
else:
return skbio.diversity.beta_diversity(
"unweighted_unifrac", counts, df.index.tolist(), tree=new_tree, otu_ids=tax_ids
) | A beta diversity metric that takes into account the relative relatedness of community
members. Weighted UniFrac looks at abundances, unweighted UniFrac looks at presence.
Parameters
----------
weighted : `bool`
Calculate the weighted (True) or unweighted (False) distance metric.
rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional
Analysis will be restricted to abundances of taxa at the specified level.
Returns
-------
skbio.stats.distance.DistanceMatrix, a distance matrix. | Below is the the instruction that describes the task:
### Input:
A beta diversity metric that takes into account the relative relatedness of community
members. Weighted UniFrac looks at abundances, unweighted UniFrac looks at presence.
Parameters
----------
weighted : `bool`
Calculate the weighted (True) or unweighted (False) distance metric.
rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional
Analysis will be restricted to abundances of taxa at the specified level.
Returns
-------
skbio.stats.distance.DistanceMatrix, a distance matrix.
### Response:
def unifrac(self, weighted=True, rank="auto"):
"""A beta diversity metric that takes into account the relative relatedness of community
members. Weighted UniFrac looks at abundances, unweighted UniFrac looks at presence.
Parameters
----------
weighted : `bool`
Calculate the weighted (True) or unweighted (False) distance metric.
rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional
Analysis will be restricted to abundances of taxa at the specified level.
Returns
-------
skbio.stats.distance.DistanceMatrix, a distance matrix.
"""
# needs read counts, not relative abundances
if self._guess_normalized():
raise OneCodexException("UniFrac requires unnormalized read counts.")
df = self.to_df(rank=rank, normalize=False)
counts = []
for c_id in df.index:
counts.append(df.loc[c_id].tolist())
tax_ids = df.keys().tolist()
tree = self.tree_build()
tree = self.tree_prune_rank(tree, rank=df.ocx_rank)
# there's a bug (?) in skbio where it expects the root to only have
# one child, so we do a little faking here
from skbio.tree import TreeNode
new_tree = TreeNode(name="fake root")
new_tree.rank = "no rank"
new_tree.append(tree)
# then finally run the calculation and return
if weighted:
return skbio.diversity.beta_diversity(
"weighted_unifrac", counts, df.index.tolist(), tree=new_tree, otu_ids=tax_ids
)
else:
return skbio.diversity.beta_diversity(
"unweighted_unifrac", counts, df.index.tolist(), tree=new_tree, otu_ids=tax_ids
) |
Subsets and Splits