code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def flatten(iterable):
"""convenience tool to flatten any nested iterable
example:
flatten([[[],[4]],[[[5,[6,7, []]]]]])
>>> [4, 5, 6, 7]
flatten('hello')
>>> 'hello'
Parameters
----------
iterable
Returns
-------
flattened object
"""
if isiterable(iterable):
flat = []
for item in list(iterable):
item = flatten(item)
if not isiterable(item):
item = [item]
flat += item
return flat
else:
return iterable | convenience tool to flatten any nested iterable
example:
flatten([[[],[4]],[[[5,[6,7, []]]]]])
>>> [4, 5, 6, 7]
flatten('hello')
>>> 'hello'
Parameters
----------
iterable
Returns
-------
flattened object |
def break_edge(self, from_index, to_index, to_jimage=None, allow_reverse=False):
"""
Remove an edge from the StructureGraph. If no image is given, this method will fail.
:param from_index: int
:param to_index: int
:param to_jimage: tuple
:param allow_reverse: If allow_reverse is True, then break_edge will
attempt to break both (from_index, to_index) and, failing that,
will attempt to break (to_index, from_index).
:return:
"""
# ensure that edge exists before attempting to remove it
existing_edges = self.graph.get_edge_data(from_index, to_index)
existing_reverse = None
if to_jimage is None:
raise ValueError("Image must be supplied, to avoid ambiguity.")
if existing_edges:
for i, properties in existing_edges.items():
if properties["to_jimage"] == to_jimage:
edge_index = i
self.graph.remove_edge(from_index, to_index, edge_index)
else:
if allow_reverse:
existing_reverse = self.graph.get_edge_data(to_index, from_index)
if existing_reverse:
for i, properties in existing_reverse.items():
if properties["to_jimage"] == to_jimage:
edge_index = i
self.graph.remove_edge(to_index, from_index, edge_index)
else:
raise ValueError("Edge cannot be broken between {} and {};\
no edge exists between those sites.".format(
from_index, to_index
)) | Remove an edge from the StructureGraph. If no image is given, this method will fail.
:param from_index: int
:param to_index: int
:param to_jimage: tuple
:param allow_reverse: If allow_reverse is True, then break_edge will
attempt to break both (from_index, to_index) and, failing that,
will attempt to break (to_index, from_index).
:return: |
def container_remove_objects(object_id, input_params={}, always_retry=False, **kwargs):
"""
Invokes the /container-xxxx/removeObjects API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Folders-and-Deletion#API-method%3A-%2Fclass-xxxx%2FremoveObjects
"""
return DXHTTPRequest('/%s/removeObjects' % object_id, input_params, always_retry=always_retry, **kwargs) | Invokes the /container-xxxx/removeObjects API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Folders-and-Deletion#API-method%3A-%2Fclass-xxxx%2FremoveObjects |
def _strip_zoom(input_string, strip_string):
"""Return zoom level as integer or throw error."""
try:
return int(input_string.strip(strip_string))
except Exception as e:
raise MapcheteConfigError("zoom level could not be determined: %s" % e) | Return zoom level as integer or throw error. |
def solar_position(moment, latitude, longitude, Z=0.0, T=298.15, P=101325.0,
atmos_refract=0.5667):
r'''Calculate the position of the sun in the sky. It is defined in terms of
two angles - the zenith and the azimith. The azimuth tells where a sundial
would see the sun as coming from; the zenith tells how high in the sky it
is. The solar elevation angle is returned for convinience; it is the
complimentary angle of the zenith.
The sun's refraction changes how high it appears as though the sun is;
so values are returned with an optional conversion to the aparent angle.
This impacts only the zenith/elevation.
Uses the Reda and Andreas (2004) model described in [1]_,
originally incorporated into the excellent
`pvlib library <https://github.com/pvlib/pvlib-python>`_
Parameters
----------
moment : datetime
Time and date for the calculation, in local UTC time (not daylight
savings time), [-]
latitude : float
Latitude, between -90 and 90 [degrees]
longitude : float
Longitude, between -180 and 180, [degrees]
Z : float, optional
Elevation above sea level for the solar position calculation, [m]
T : float, optional
Temperature of atmosphere at ground level, [K]
P : float, optional
Pressure of atmosphere at ground level, [Pa]
atmos_refract : float, optional
Atmospheric refractivity, [degrees]
Returns
-------
apparent_zenith : float
Zenith of the sun as observed from the ground based after accounting
for atmospheric refraction, [degrees]
zenith : float
Actual zenith of the sun (ignores atmospheric refraction), [degrees]
apparent_altitude : float
Altitude of the sun as observed from the ground based after accounting
for atmospheric refraction, [degrees]
altitude : float
Actual altitude of the sun (ignores atmospheric refraction), [degrees]
azimuth : float
The azimuth of the sun, [degrees]
equation_of_time : float
Equation of time - the number of seconds to be added to the day's
mean solar time to obtain the apparent solar noon time, [seconds]
Examples
--------
>>> solar_position(datetime(2003, 10, 17, 13, 30, 30), 45, 45)
[140.8367913391112, 140.8367913391112, -50.83679133911118, -50.83679133911118, 329.9096671679604, 878.4902950980904]
Sunrise occurs when the zenith is 90 degrees (Calgary, AB):
>>> solar_position(datetime(2018, 4, 15, 6, 43, 5), 51.0486, -114.07)[0]
90.00054676987014
Sunrise also occurs when the zenith is 90 degrees (13.5 hours later):
>>> solar_position(datetime(2018, 4, 15, 20, 30, 28), 51.0486, -114.07)
[89.9995695661236, 90.54103812161853, 0.00043043387640950836, -0.5410381216185247, 286.8313781904518, 6.631429525878048]
Notes
-----
If you were standing at the same longitude of the sun such that it was no
further east or west than you were, the amount of angle it was south or
north of you is the *zenith*. If it were directly overhead it would be 0°;
a little north or south and it would be a little positive;
near sunset or sunrise, near 90°; and at night, between 90° and 180°.
The *solar altitude angle* is defined as 90° -`zenith`.
Note the *elevation* angle is just another name for the *altitude* angle.
The *azimuth* the angle in degrees that the sun is East of the North angle.
It is positive North eastwards 0° to 360°. Other conventions may be used.
Note that due to differences in atmospheric refractivity, estimation of
sunset and sunrise are accuract to no more than one minute. Refraction
conditions truly vary across the atmosphere; so characterizing it by an
average value is limiting as well.
References
----------
.. [1] Reda, Ibrahim, and Afshin Andreas. "Solar Position Algorithm for
Solar Radiation Applications." Solar Energy 76, no. 5 (January 1, 2004):
577-89. https://doi.org/10.1016/j.solener.2003.12.003.
.. [2] "Navigation - What Azimuth Description Systems Are in Use? -
Astronomy Stack Exchange."
https://astronomy.stackexchange.com/questions/237/what-azimuth-description-systems-are-in-use?rq=1.
'''
from fluids.optional import spa
delta_t = spa.calculate_deltat(moment.year, moment.month)
unixtime = time.mktime(moment.timetuple())
# Input pressure in milibar; input temperature in deg C
result = spa.solar_position_numpy(unixtime, lat=latitude, lon=longitude, elev=Z,
pressure=P*1E-2, temp=T-273.15, delta_t=delta_t,
atmos_refract=atmos_refract, sst=False, esd=False)
# confirmed equation of time https://www.minasi.com/figeot.asp
# Convert minutes to seconds; sometimes negative, sometimes positive
result[-1] = result[-1]*60.0
return result | r'''Calculate the position of the sun in the sky. It is defined in terms of
two angles - the zenith and the azimith. The azimuth tells where a sundial
would see the sun as coming from; the zenith tells how high in the sky it
is. The solar elevation angle is returned for convinience; it is the
complimentary angle of the zenith.
The sun's refraction changes how high it appears as though the sun is;
so values are returned with an optional conversion to the aparent angle.
This impacts only the zenith/elevation.
Uses the Reda and Andreas (2004) model described in [1]_,
originally incorporated into the excellent
`pvlib library <https://github.com/pvlib/pvlib-python>`_
Parameters
----------
moment : datetime
Time and date for the calculation, in local UTC time (not daylight
savings time), [-]
latitude : float
Latitude, between -90 and 90 [degrees]
longitude : float
Longitude, between -180 and 180, [degrees]
Z : float, optional
Elevation above sea level for the solar position calculation, [m]
T : float, optional
Temperature of atmosphere at ground level, [K]
P : float, optional
Pressure of atmosphere at ground level, [Pa]
atmos_refract : float, optional
Atmospheric refractivity, [degrees]
Returns
-------
apparent_zenith : float
Zenith of the sun as observed from the ground based after accounting
for atmospheric refraction, [degrees]
zenith : float
Actual zenith of the sun (ignores atmospheric refraction), [degrees]
apparent_altitude : float
Altitude of the sun as observed from the ground based after accounting
for atmospheric refraction, [degrees]
altitude : float
Actual altitude of the sun (ignores atmospheric refraction), [degrees]
azimuth : float
The azimuth of the sun, [degrees]
equation_of_time : float
Equation of time - the number of seconds to be added to the day's
mean solar time to obtain the apparent solar noon time, [seconds]
Examples
--------
>>> solar_position(datetime(2003, 10, 17, 13, 30, 30), 45, 45)
[140.8367913391112, 140.8367913391112, -50.83679133911118, -50.83679133911118, 329.9096671679604, 878.4902950980904]
Sunrise occurs when the zenith is 90 degrees (Calgary, AB):
>>> solar_position(datetime(2018, 4, 15, 6, 43, 5), 51.0486, -114.07)[0]
90.00054676987014
Sunrise also occurs when the zenith is 90 degrees (13.5 hours later):
>>> solar_position(datetime(2018, 4, 15, 20, 30, 28), 51.0486, -114.07)
[89.9995695661236, 90.54103812161853, 0.00043043387640950836, -0.5410381216185247, 286.8313781904518, 6.631429525878048]
Notes
-----
If you were standing at the same longitude of the sun such that it was no
further east or west than you were, the amount of angle it was south or
north of you is the *zenith*. If it were directly overhead it would be 0°;
a little north or south and it would be a little positive;
near sunset or sunrise, near 90°; and at night, between 90° and 180°.
The *solar altitude angle* is defined as 90° -`zenith`.
Note the *elevation* angle is just another name for the *altitude* angle.
The *azimuth* the angle in degrees that the sun is East of the North angle.
It is positive North eastwards 0° to 360°. Other conventions may be used.
Note that due to differences in atmospheric refractivity, estimation of
sunset and sunrise are accuract to no more than one minute. Refraction
conditions truly vary across the atmosphere; so characterizing it by an
average value is limiting as well.
References
----------
.. [1] Reda, Ibrahim, and Afshin Andreas. "Solar Position Algorithm for
Solar Radiation Applications." Solar Energy 76, no. 5 (January 1, 2004):
577-89. https://doi.org/10.1016/j.solener.2003.12.003.
.. [2] "Navigation - What Azimuth Description Systems Are in Use? -
Astronomy Stack Exchange."
https://astronomy.stackexchange.com/questions/237/what-azimuth-description-systems-are-in-use?rq=1. |
def xpose6(m):
"""
Transpose a 6x6 matrix
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/xpose6_c.html
:param m: Matrix to be transposed
:type m: list[6][6]
:return: Transposed matrix
:rtype: list[6][6]
"""
m = stypes.toDoubleMatrix(m)
mout = stypes.emptyDoubleMatrix(x=6, y=6)
libspice.xpose6_c(m, mout)
return stypes.cMatrixToNumpy(mout) | Transpose a 6x6 matrix
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/xpose6_c.html
:param m: Matrix to be transposed
:type m: list[6][6]
:return: Transposed matrix
:rtype: list[6][6] |
def cumulative_window(group_by=None, order_by=None):
"""Create a cumulative window for use with aggregate window functions.
All window frames / ranges are inclusive.
Parameters
----------
group_by : expressions, default None
Either specify here or with TableExpr.group_by
order_by : expressions, default None
For analytic functions requiring an ordering, specify here, or let Ibis
determine the default ordering (for functions like rank)
Returns
-------
Window
"""
return Window(
preceding=None, following=0, group_by=group_by, order_by=order_by
) | Create a cumulative window for use with aggregate window functions.
All window frames / ranges are inclusive.
Parameters
----------
group_by : expressions, default None
Either specify here or with TableExpr.group_by
order_by : expressions, default None
For analytic functions requiring an ordering, specify here, or let Ibis
determine the default ordering (for functions like rank)
Returns
-------
Window |
def download_file(url, filename=None, show_progress=draw_pbar):
'''
Download a file and show progress
url: the URL of the file to download
filename: the filename to download it to (if not given, uses the url's filename part)
show_progress: callback function to update a progress bar
the show_progress function shall take two parameters: `seen` and `size`, and
return nothing.
This function returns the filename it has written the result to.
'''
if filename is None:
filename = url.split('/')[-1]
r = requests.get(url, stream=True)
size = int(r.headers['Content-Length'].strip())
seen = 0
show_progress(0, size)
seen = 1024
with open(filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
seen += 1024
show_progress(seen, size)
if chunk:
f.write(chunk)
f.flush()
return filename | Download a file and show progress
url: the URL of the file to download
filename: the filename to download it to (if not given, uses the url's filename part)
show_progress: callback function to update a progress bar
the show_progress function shall take two parameters: `seen` and `size`, and
return nothing.
This function returns the filename it has written the result to. |
def fix_config(self, options):
"""
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
"""
options = super(InitStorageValue, self).fix_config(options)
opt = "storage_name"
if opt not in options:
options[opt] = "unknown"
if opt not in self.help:
self.help[opt] = "The name of the storage value to delete (string)."
opt = "value"
if opt not in options:
options[opt] = "1"
if opt not in self.help:
self.help[opt] = "The initial value (string)."
return options | Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict |
def closeEvent(self, event):
"""
things to be done when gui closes, like save the settings
"""
self.script_thread.quit()
self.read_probes.quit()
if self.config_filename:
fname = self.config_filename
self.save_config(fname)
event.accept()
print('\n\n======================================================')
print('================= Closing B26 Python LAB =============')
print('======================================================\n\n') | things to be done when gui closes, like save the settings |
def set_send_enable(self, setting):
"""
Set the send enable setting on the watch
"""
self._pebble.send_packet(DataLogging(data=DataLoggingSetSendEnable(enabled=setting))) | Set the send enable setting on the watch |
def make_headers(context: TraceContext) -> Headers:
"""Creates dict with zipkin headers from supplied trace context.
"""
headers = {
TRACE_ID_HEADER: context.trace_id,
SPAN_ID_HEADER: context.span_id,
FLAGS_HEADER: '0',
SAMPLED_ID_HEADER: '1' if context.sampled else '0',
}
if context.parent_id is not None:
headers[PARENT_ID_HEADER] = context.parent_id
return headers | Creates dict with zipkin headers from supplied trace context. |
async def get_scene(self, scene_id, from_cache=True) -> Scene:
"""Get a scene resource instance.
:raises a ResourceNotFoundException when no scene found.
:raises a PvApiError when something is wrong with the hub.
"""
if not from_cache:
await self.get_scenes()
for _scene in self.scenes:
if _scene.id == scene_id:
return _scene
raise ResourceNotFoundException("Scene not found scene_id: {}".format(scene_id)) | Get a scene resource instance.
:raises a ResourceNotFoundException when no scene found.
:raises a PvApiError when something is wrong with the hub. |
def set_server(self, server_pos, key, value):
"""Set the key to the value for the server_pos (position in the list)."""
if zeroconf_tag and self.zeroconf_enable_tag:
self.listener.set_server(server_pos, key, value) | Set the key to the value for the server_pos (position in the list). |
def cmprss(delim, n, instr, lenout=_default_len_out):
"""
Compress a character string by removing occurrences of
more than N consecutive occurrences of a specified
character.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/cmprss_c.html
:param delim: Delimiter to be compressed.
:type delim: str
:param n: Maximum consecutive occurrences of delim.
:type n: int
:param instr: Input string.
:type instr: str
:param lenout: Optional available space in output string.
:type lenout: Optional int
:return: Compressed string.
:rtype: str
"""
delim = ctypes.c_char(delim.encode(encoding='UTF-8'))
n = ctypes.c_int(n)
instr = stypes.stringToCharP(instr)
output = stypes.stringToCharP(lenout)
libspice.cmprss_c(delim, n, instr, lenout, output)
return stypes.toPythonString(output) | Compress a character string by removing occurrences of
more than N consecutive occurrences of a specified
character.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/cmprss_c.html
:param delim: Delimiter to be compressed.
:type delim: str
:param n: Maximum consecutive occurrences of delim.
:type n: int
:param instr: Input string.
:type instr: str
:param lenout: Optional available space in output string.
:type lenout: Optional int
:return: Compressed string.
:rtype: str |
def reset ():
""" Clear the module state. This is mainly for testing purposes.
Note that this must be called _after_ resetting the module 'feature'.
"""
global __prefixes_suffixes, __suffixes_to_types, __types, __rule_names_to_types, __target_suffixes_cache
__register_features ()
# Stores suffixes for generated targets.
__prefixes_suffixes = [property.PropertyMap(), property.PropertyMap()]
# Maps suffixes to types
__suffixes_to_types = {}
# A map with all the registered types, indexed by the type name
# Each entry is a dictionary with following values:
# 'base': the name of base type or None if type has no base
# 'derived': a list of names of type which derive from this one
# 'scanner': the scanner class registered for this type, if any
__types = {}
# Caches suffixes for targets with certain properties.
__target_suffixes_cache = {} | Clear the module state. This is mainly for testing purposes.
Note that this must be called _after_ resetting the module 'feature'. |
def get_subdomain_history_neighbors(self, cursor, subdomain_rec):
"""
Given a subdomain record, get its neighbors.
I.e. get all of the subdomain records with the previous sequence number,
and get all of the subdomain records with the next sequence number
Returns {'prev': [...blockchain order...], 'cur': [...blockchain order...], 'fut': [...blockchain order...]}
"""
# what's the subdomain's immediate prior history?
hist = self.subdomain_db.get_subdomain_history(subdomain_rec.get_fqn(), include_unaccepted=True, start_sequence=subdomain_rec.n-1, end_sequence=subdomain_rec.n, cur=cursor)
hist.sort(lambda h1, h2: -1 if h1.n < h2.n or (h1.n == h2.n and h1.parent_zonefile_index < h2.parent_zonefile_index) \
else 0 if h1.n == h2.n and h1.parent_zonefile_index == h2.parent_zonefile_index \
else 1)
# what's the subdomain's current and immediate future?
fut = self.subdomain_db.get_subdomain_history(subdomain_rec.get_fqn(), include_unaccepted=True, start_sequence=subdomain_rec.n, end_sequence=subdomain_rec.n+2, cur=cursor)
fut.sort(lambda h1, h2: -1 if h1.n < h2.n or (h1.n == h2.n and h1.parent_zonefile_index < h2.parent_zonefile_index) \
else 0 if h1.n == h2.n and h1.parent_zonefile_index == h2.parent_zonefile_index \
else 1)
# extract the current (conflicting) records from the future
cur = []
tmp_fut = []
for f in fut:
if f.n == subdomain_rec.n:
cur.append(f)
else:
tmp_fut.append(f)
fut = tmp_fut
ret = {'prev': hist, 'cur': cur, 'fut': fut}
return ret | Given a subdomain record, get its neighbors.
I.e. get all of the subdomain records with the previous sequence number,
and get all of the subdomain records with the next sequence number
Returns {'prev': [...blockchain order...], 'cur': [...blockchain order...], 'fut': [...blockchain order...]} |
def getTransitionProbabilities(state, action):
"""
Parameters
----------
state : tuple
The state
action : int
The action
Returns
-------
s1, p, r : tuple of two lists and an int
s1 are the next states, p are the probabilities, and r is the reward
"""
#assert isValid(state)
assert 0 <= action < ACTIONS
if not isLegal(state, action):
# If the action is illegal, then transition back to the same state but
# incur a high negative reward
s1 = [convertTupleToIndex(state)]
return(s1, [1], -10)
# Update the state with the action
state = list(state)
state[action] = PLAYER
if isWon(state, PLAYER):
# If the player's action is a winning move then transition to the
# winning state and receive a reward of 1.
s1 = [convertTupleToIndex(state)]
return(s1, [1], 1)
elif isDraw(state):
s1 = [convertTupleToIndex(state)]
return(s1, [1], 0)
# Now we search through the opponents moves, and calculate transition
# probabilities based on maximising the opponents chance of winning..
s1 = []
p = []
legal_a = getLegalActions(state)
for a in legal_a:
state[a] = OPPONENT
# If the opponent is going to win, we assume that the winning move will
# be chosen:
if isWon(state, OPPONENT):
s1 = [convertTupleToIndex(state)]
return(s1, [1], -1)
elif isDraw(state):
s1 = [convertTupleToIndex(state)]
return(s1, [1], 0)
# Otherwise we assume the opponent will select a move with uniform
# probability across potential moves:
s1.append(convertTupleToIndex(state))
p.append(1.0 / len(legal_a))
state[a] = 0
# During non-terminal play states the reward is 0.
return(s1, p, 0) | Parameters
----------
state : tuple
The state
action : int
The action
Returns
-------
s1, p, r : tuple of two lists and an int
s1 are the next states, p are the probabilities, and r is the reward |
def nuc_v(msg):
"""Calculate NUCv, Navigation Uncertainty Category - Velocity (ADS-B version 1)
Args:
msg (string): 28 bytes hexadecimal message string,
Returns:
int or string: 95% Horizontal Velocity Error
int or string: 95% Vertical Velocity Error
"""
tc = typecode(msg)
if tc != 19:
raise RuntimeError("%s: Not an airborne velocity message, expecting TC = 19" % msg)
msgbin = common.hex2bin(msg)
NUCv = common.bin2int(msgbin[42:45])
try:
HVE = uncertainty.NUCv[NUCv]['HVE']
VVE = uncertainty.NUCv[NUCv]['VVE']
except KeyError:
HVE, VVE = uncertainty.NA, uncertainty.NA
return HVE, VVE | Calculate NUCv, Navigation Uncertainty Category - Velocity (ADS-B version 1)
Args:
msg (string): 28 bytes hexadecimal message string,
Returns:
int or string: 95% Horizontal Velocity Error
int or string: 95% Vertical Velocity Error |
def clean_caches(path):
"""
Removes all python cache files recursively on a path.
:param path: the path
:return: None
"""
for dirname, subdirlist, filelist in os.walk(path):
for f in filelist:
if f.endswith('pyc'):
try:
os.remove(os.path.join(dirname, f))
except FileNotFoundError:
pass
if dirname.endswith('__pycache__'):
shutil.rmtree(dirname) | Removes all python cache files recursively on a path.
:param path: the path
:return: None |
def sync(to_install, to_uninstall, verbose=False, dry_run=False, install_flags=None):
"""
Install and uninstalls the given sets of modules.
"""
if not to_uninstall and not to_install:
click.echo("Everything up-to-date")
pip_flags = []
if not verbose:
pip_flags += ['-q']
if to_uninstall:
if dry_run:
click.echo("Would uninstall:")
for pkg in to_uninstall:
click.echo(" {}".format(pkg))
else:
check_call([sys.executable, '-m', 'pip', 'uninstall', '-y'] + pip_flags + sorted(to_uninstall))
if to_install:
if install_flags is None:
install_flags = []
if dry_run:
click.echo("Would install:")
for ireq in to_install:
click.echo(" {}".format(format_requirement(ireq)))
else:
# prepare requirement lines
req_lines = []
for ireq in sorted(to_install, key=key_from_ireq):
ireq_hashes = get_hashes_from_ireq(ireq)
req_lines.append(format_requirement(ireq, hashes=ireq_hashes))
# save requirement lines to a temporary file
tmp_req_file = tempfile.NamedTemporaryFile(mode='wt', delete=False)
tmp_req_file.write('\n'.join(req_lines))
tmp_req_file.close()
try:
check_call(
[sys.executable, '-m', 'pip', 'install', '-r', tmp_req_file.name] + pip_flags + install_flags
)
finally:
os.unlink(tmp_req_file.name)
return 0 | Install and uninstalls the given sets of modules. |
def user_data_dir(appname, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific data dir for this application.
"appname" is the name of application.
"appauthor" (only required and used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: ~/Library/Application Support/<AppName>
Unix: ~/.config/<appname> # or in $XDG_CONFIG_HOME if defined
Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME. We don't
use $XDG_DATA_HOME as that data dir is mostly used at the time of
installation, instead of the application adding data during runtime.
Also, in practice, Linux apps tend to store their data in
"~/.config/<appname>" instead of "~/.local/share/<appname>".
"""
if sys.platform.startswith("win"):
if appauthor is None:
raise AppDirsError("must specify 'appauthor' on Windows")
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
path = os.path.join(_get_win_folder(const), appauthor, appname)
elif sys.platform == 'darwin':
path = os.path.join(
os.path.expanduser('~/Library/Application Support/'),
appname)
else:
path = os.path.join(
os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config")),
appname.lower())
if version:
path = os.path.join(path, version)
return path | r"""Return full path to the user-specific data dir for this application.
"appname" is the name of application.
"appauthor" (only required and used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: ~/Library/Application Support/<AppName>
Unix: ~/.config/<appname> # or in $XDG_CONFIG_HOME if defined
Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME. We don't
use $XDG_DATA_HOME as that data dir is mostly used at the time of
installation, instead of the application adding data during runtime.
Also, in practice, Linux apps tend to store their data in
"~/.config/<appname>" instead of "~/.local/share/<appname>". |
def upgrade():
"""Upgrade database."""
op.create_table(
'oauthclient_remoteaccount',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('client_id', sa.String(length=255), nullable=False),
sa.Column(
'extra_data',
sqlalchemy_utils.JSONType(),
nullable=False),
sa.ForeignKeyConstraint(['user_id'], [u'accounts_user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('user_id', 'client_id')
)
op.create_table(
'oauthclient_useridentity',
sa.Column('id', sa.String(length=255), nullable=False),
sa.Column('method', sa.String(length=255), nullable=False),
sa.Column('id_user', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['id_user'], [u'accounts_user.id'], ),
sa.PrimaryKeyConstraint('id', 'method')
)
op.create_index(
'useridentity_id_user_method', 'oauthclient_useridentity',
['id_user', 'method'], unique=True
)
op.create_table(
'oauthclient_remotetoken',
sa.Column('id_remote_account', sa.Integer(), nullable=False),
sa.Column('token_type', sa.String(length=40), nullable=False),
sa.Column(
'access_token',
sqlalchemy_utils.EncryptedType(),
nullable=False),
sa.Column('secret', sa.Text(), nullable=False),
sa.ForeignKeyConstraint(
['id_remote_account'], [u'oauthclient_remoteaccount.id'],
name='fk_oauthclient_remote_token_remote_account'
),
sa.PrimaryKeyConstraint('id_remote_account', 'token_type')
) | Upgrade database. |
def build(self, pre=None, shortest=False):
"""Build this rule definition
:param list pre: The prerequisites list
:param bool shortest: Whether or not the shortest reference-chain (most minimal) version of the field should be generated.
"""
if pre is None:
pre = []
res = deque()
for value in self.values:
try:
res.append(utils.val(value, pre, shortest=shortest))
except errors.FlushGrams as e:
prev = "".join(res)
res.clear()
# this is assuming a scope was pushed!
if len(self.fuzzer._scope_stack) == 1:
pre.append(prev)
else:
stmts = self.fuzzer._curr_scope.setdefault("prev_append", deque())
stmts.extend(pre)
stmts.append(prev)
pre.clear()
continue
except errors.OptGram as e:
continue
except errors.GramFuzzError as e:
print("{} : {}".format(self.name, str(e)))
raise
return self.sep.join(res) | Build this rule definition
:param list pre: The prerequisites list
:param bool shortest: Whether or not the shortest reference-chain (most minimal) version of the field should be generated. |
def connect(self, name, func, sender=None, dispatch_uid=None):
"""
Connects a function to a hook.\
Creates the hook (name) if it does not exists
:param str name: The hook name
:param callable func: A function reference used as a callback
:param class sender: Optional sender __class__ to which the\
func should respond. Default will match all
:param str dispatch_uid: Optional unique id,\
see :py:class:`django.dispatch.Signal` for more info
"""
try:
signal = self._registry[name]
except KeyError:
signal = self.register(name)
signal.connect(func, sender=sender, dispatch_uid=dispatch_uid) | Connects a function to a hook.\
Creates the hook (name) if it does not exists
:param str name: The hook name
:param callable func: A function reference used as a callback
:param class sender: Optional sender __class__ to which the\
func should respond. Default will match all
:param str dispatch_uid: Optional unique id,\
see :py:class:`django.dispatch.Signal` for more info |
def pretty_size(value):
"""Convert a number of bytes into a human-readable string.
Output is 2...5 characters. Values >= 1000 always produce output in form: x.xxxU, xx.xxU, xxxU, xxxxU.
"""
exp = int(math.log(value, 1024)) if value > 0 else 0
unit = 'bkMGTPEZY'[exp]
if exp == 0:
return '%d%s' % (value, unit) # value < 1024, result is always without fractions
unit_value = value / (1024.0 ** exp) # value in the relevant units
places = int(math.log(unit_value, 10)) # number of digits before decimal point
return '%.*f%s' % (2 - places, unit_value, unit) | Convert a number of bytes into a human-readable string.
Output is 2...5 characters. Values >= 1000 always produce output in form: x.xxxU, xx.xxU, xxxU, xxxxU. |
def _cmptimestamps(self, filest1, filest2):
""" Compare time stamps of two files and return True
if file1 (source) is more recent than file2 (target) """
mtime_cmp = int((filest1.st_mtime - filest2.st_mtime) * 1000) > 0
if self._use_ctime:
return mtime_cmp or \
int((filest1.st_ctime - filest2.st_mtime) * 1000) > 0
else:
return mtime_cmp | Compare time stamps of two files and return True
if file1 (source) is more recent than file2 (target) |
def delete(self, id):
"""DELETE /mapfiles/id: Delete an existing mapfile owned by the current
user. Deletion of the map entry in db and remove mapfile from filesystem. """
map = self._delete_map_from_user_by_id(c.user, id)
if map is None:
abort(404)
if os.path.exists(os.path.join(config['mapfiles_dir'], map.filepath)):
os.unlink(os.path.join(config['mapfiles_dir'], map.filepath))
response.status = 204
# remove content-type from response headers so that webtest won't get confused
# http://groups.google.com/group/pylons-discuss/browse_thread/thread/1267650386ae521b
del response.headers['content-type']
return | DELETE /mapfiles/id: Delete an existing mapfile owned by the current
user. Deletion of the map entry in db and remove mapfile from filesystem. |
def _Open(self, path_spec, mode='rb'):
"""Opens the file system defined by path specification.
Args:
path_spec (PathSpec): a path specification.
mode (Optional[str]): file access mode. The default is 'rb' which
represents read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
"""
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
range_offset = getattr(path_spec, 'range_offset', None)
if range_offset is None:
raise errors.PathSpecError(
'Unsupported path specification without encoding method.')
range_size = getattr(path_spec, 'range_size', None)
if range_size is None:
raise errors.PathSpecError(
'Unsupported path specification without encoding method.')
self._range_offset = range_offset
self._range_size = range_size | Opens the file system defined by path specification.
Args:
path_spec (PathSpec): a path specification.
mode (Optional[str]): file access mode. The default is 'rb' which
represents read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid. |
def file_delete(context, id, file_id):
"""file_delete(context, id, path)
Delete a component file
>>> dcictl component-file-delete [OPTIONS]
:param string id: ID of the component to delete file [required]
:param string file_id: ID for the file to delete [required]
"""
component.file_delete(context, id=id, file_id=file_id) | file_delete(context, id, path)
Delete a component file
>>> dcictl component-file-delete [OPTIONS]
:param string id: ID of the component to delete file [required]
:param string file_id: ID for the file to delete [required] |
def build_path(graph, node1, node2, path=None):
"""
Build the path from node1 to node2.
The path is composed of all the nodes between node1 and node2,
node1 excluded. Although if there is a loop starting from node1, it will be
included in the path.
"""
if path is None:
path = []
if node1 is node2:
return path
path.append(node2)
for pred in graph.all_preds(node2):
if pred in path:
continue
build_path(graph, node1, pred, path)
return path | Build the path from node1 to node2.
The path is composed of all the nodes between node1 and node2,
node1 excluded. Although if there is a loop starting from node1, it will be
included in the path. |
def attention_lm_base():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.hidden_size = 1024
hparams.batch_size = 8192
hparams.max_length = 256
hparams.dropout = 0.0
hparams.clip_grad_norm = 0. # i.e. no gradient clipping
hparams.optimizer_adam_epsilon = 1e-9
hparams.learning_rate_decay_scheme = "noam"
hparams.learning_rate = 0.1
hparams.learning_rate_warmup_steps = 2000
hparams.initializer_gain = 1.0
hparams.num_hidden_layers = 6
hparams.initializer = "uniform_unit_scaling"
hparams.weight_decay = 0.0
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.98
hparams.label_smoothing = 0.0
hparams.shared_embedding_and_softmax_weights = False
hparams.add_hparam("filter_size", 4096) # Add new ones like this.
# attention-related flags
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("attention_key_channels", 0)
hparams.add_hparam("attention_value_channels", 0)
# All hyperparameters ending in "dropout" are automatically set to 0.0
# when not in training mode.
hparams.add_hparam("attention_dropout", 0.0)
hparams.add_hparam("relu_dropout", 0.0)
hparams.add_hparam("pos", "timing") # timing, none
hparams.add_hparam("encoder_full_attention", False)
return hparams | Set of hyperparameters. |
def copy(
self,
name,
start_codons=None,
stop_codons=None,
codon_table=None,
codon_table_changes=None):
"""
Make copy of this GeneticCode object with optional replacement
values for all fields.
"""
new_start_codons = (
self.start_codons.copy()
if start_codons is None
else start_codons)
new_stop_codons = (
self.stop_codons.copy()
if stop_codons is None
else stop_codons)
new_codon_table = (
self.codon_table.copy()
if codon_table is None
else codon_table)
if codon_table_changes is not None:
new_codon_table.update(codon_table_changes)
return GeneticCode(
name=name,
start_codons=new_start_codons,
stop_codons=new_stop_codons,
codon_table=new_codon_table) | Make copy of this GeneticCode object with optional replacement
values for all fields. |
def usage():
"""
Illustrate what the various input flags are and the options should be.
:return: none
"""
global g_script_name # name of the script being run.
print("")
print("Usage: " + g_script_name + " [...options...]")
print("")
print(" --help print out this help menu and show all the valid flags and inputs.")
print("")
print(" --inputfileadd filename where the new java messages to ignore are stored in.")
print("")
print(" --inputfilerm filename where the java messages are removed from the ignored list.")
print("")
print(" --loadjavamessage filename pickle file that stores the dict structure containing java messages to include.")
print("")
print(" --savejavamessage filename pickle file that saves the final dict structure after update.")
print("")
print(" --printjavamessage filename print java ignored java messages stored in pickle file filenam onto console and save into a text file.")
print("")
sys.exit(1) | Illustrate what the various input flags are and the options should be.
:return: none |
def _extract_symbols(self, symbols, default=None):
"""! @brief Fill 'symbols' field with required flash algo symbols"""
to_ret = {}
for symbol in symbols:
symbolInfo = self.elf.symbol_decoder.get_symbol_for_name(symbol)
if symbolInfo is None:
if default is not None:
to_ret[symbol] = default
continue
raise FlashAlgoException("Missing symbol %s" % symbol)
to_ret[symbol] = symbolInfo.address
return to_ret | ! @brief Fill 'symbols' field with required flash algo symbols |
def add_values_to_bundle_safe(connection, bundle, values):
"""
Adds values to specified bundle. Checks, whether each value already contains in bundle. If yes, it is not added.
Args:
connection: An opened Connection instance.
bundle: Bundle instance to add values in.
values: Values, that should be added in bundle.
Raises:
YouTrackException: if something is wrong with queries.
"""
for value in values:
try:
connection.addValueToBundle(bundle, value)
except YouTrackException as e:
if e.response.status == 409:
print("Value with name [ %s ] already exists in bundle [ %s ]" %
(utf8encode(value.name), utf8encode(bundle.name)))
else:
raise e | Adds values to specified bundle. Checks, whether each value already contains in bundle. If yes, it is not added.
Args:
connection: An opened Connection instance.
bundle: Bundle instance to add values in.
values: Values, that should be added in bundle.
Raises:
YouTrackException: if something is wrong with queries. |
def get(self, key):
"""Get a document by id."""
doc = self._collection.find_one({'_id': key})
if doc:
doc.pop('_id')
return doc | Get a document by id. |
def infos(self):
''' dict: The summation of all data available about the extracted article
Note:
Read only '''
data = {
"meta": {
"description": self.meta_description,
"lang": self.meta_lang,
"keywords": self.meta_keywords,
"favicon": self.meta_favicon,
"canonical": self.canonical_link,
"encoding": self.meta_encoding
},
"image": None,
"domain": self.domain,
"title": self.title,
"cleaned_text": self.cleaned_text,
"opengraph": self.opengraph,
"tags": self.tags,
"tweets": self.tweets,
"movies": [],
"links": self.links,
"authors": self.authors,
"publish_date": self.publish_date
}
# image
if self.top_image is not None:
data['image'] = {
'url': self.top_image.src,
'width': self.top_image.width,
'height': self.top_image.height,
'type': 'image'
}
# movies
for movie in self.movies:
data['movies'].append({
'embed_type': movie.embed_type,
'provider': movie.provider,
'width': movie.width,
'height': movie.height,
'embed_code': movie.embed_code,
'src': movie.src,
})
return data | dict: The summation of all data available about the extracted article
Note:
Read only |
def direction_vector(self, angle):
'''
Returns a unit vector, pointing in the arc's movement direction at a given (absolute) angle (in degrees).
No check is made whether angle lies within the arc's span (the results for angles outside of the arc's span )
Returns a 2x1 numpy array.
>>> a = Arc((0, 0), 1, 0, 90, True)
>>> assert all(abs(a.direction_vector(0) - np.array([0.0, 1.0])) < tol)
>>> assert all(abs(a.direction_vector(45) - np.array([ -0.70710678, 0.70710678])) < 1e-6)
>>> assert all(abs(a.direction_vector(90) - np.array([-1.0, 0.0])) < tol)
>>> assert all(abs(a.direction_vector(135) - np.array([-0.70710678, -0.70710678])) < 1e-6)
>>> assert all(abs(a.direction_vector(-180) - np.array([0.0, -1.0])) < tol)
>>> assert all(abs(a.direction_vector(-90) - np.array([1.0, 0.0])) < tol)
>>> a = a.reversed()
>>> assert all(abs(a.direction_vector(0) - np.array([0.0, -1.0])) < tol)
>>> assert all(abs(a.direction_vector(45) - np.array([ 0.70710678, -0.70710678])) < 1e-6)
>>> assert all(abs(a.direction_vector(90) - np.array([1.0, 0.0])) < tol)
>>> assert all(abs(a.direction_vector(135) - np.array([0.70710678, 0.70710678])) < 1e-6)
>>> assert all(abs(a.direction_vector(-180) - np.array([0.0, 1.0])) < tol)
>>> assert all(abs(a.direction_vector(-90) - np.array([-1.0, 0.0])) < tol)
'''
a = angle + self.sign * 90
a = a * np.pi / 180.0
return np.array([np.cos(a), np.sin(a)]) | Returns a unit vector, pointing in the arc's movement direction at a given (absolute) angle (in degrees).
No check is made whether angle lies within the arc's span (the results for angles outside of the arc's span )
Returns a 2x1 numpy array.
>>> a = Arc((0, 0), 1, 0, 90, True)
>>> assert all(abs(a.direction_vector(0) - np.array([0.0, 1.0])) < tol)
>>> assert all(abs(a.direction_vector(45) - np.array([ -0.70710678, 0.70710678])) < 1e-6)
>>> assert all(abs(a.direction_vector(90) - np.array([-1.0, 0.0])) < tol)
>>> assert all(abs(a.direction_vector(135) - np.array([-0.70710678, -0.70710678])) < 1e-6)
>>> assert all(abs(a.direction_vector(-180) - np.array([0.0, -1.0])) < tol)
>>> assert all(abs(a.direction_vector(-90) - np.array([1.0, 0.0])) < tol)
>>> a = a.reversed()
>>> assert all(abs(a.direction_vector(0) - np.array([0.0, -1.0])) < tol)
>>> assert all(abs(a.direction_vector(45) - np.array([ 0.70710678, -0.70710678])) < 1e-6)
>>> assert all(abs(a.direction_vector(90) - np.array([1.0, 0.0])) < tol)
>>> assert all(abs(a.direction_vector(135) - np.array([0.70710678, 0.70710678])) < 1e-6)
>>> assert all(abs(a.direction_vector(-180) - np.array([0.0, 1.0])) < tol)
>>> assert all(abs(a.direction_vector(-90) - np.array([-1.0, 0.0])) < tol) |
def pop(self, pair, default=None):
"""
Removes the **pair** from the Kerning and returns the value as an ``int``.
If no pair is found, **default** is returned. **pair** is a
``tuple`` of two :ref:`type-string`\s. This must return either
**default** or a :ref:`type-int-float`.
>>> font.kerning.pop(("A", "V"))
-20
>>> font.kerning.pop(("A", "W"))
-10.5
"""
return super(BaseKerning, self).pop(pair, default) | Removes the **pair** from the Kerning and returns the value as an ``int``.
If no pair is found, **default** is returned. **pair** is a
``tuple`` of two :ref:`type-string`\s. This must return either
**default** or a :ref:`type-int-float`.
>>> font.kerning.pop(("A", "V"))
-20
>>> font.kerning.pop(("A", "W"))
-10.5 |
def char_between(lower, upper, func_name):
'''return current char and step if char is between lower and upper, where
@test: a python function with one argument, which tests on one char and return True or False
@test must be registered with register_function'''
function = register_function(func_name,
lambda char: lower<=char<=upper)
return char_on_predicate(function) | return current char and step if char is between lower and upper, where
@test: a python function with one argument, which tests on one char and return True or False
@test must be registered with register_function |
def returnValueList(self, key_list, last=False):
'''Return a list of key values for the first entry in the current list.
If 'last=True', then the last entry is referenced."
Returns None is the list is empty. If a key is missing, then
that entry in the list is None.
Example of use:
>>> test = [
... {"name": "Jim", "age": 18, "income": 93000, "order": 2},
... {"name": "Larry", "age": 18, "order": 3},
... {"name": "Joe", "age": 20, "income": 15000, "order": 1},
... {"name": "Bill", "age": 19, "income": 29000, "order": 4},
... ]
>>> print PLOD(test).returnValueList(["name", "income"])
['Jim', 93000]
>>> print PLOD(test).sort("name").returnValueList(["name", "income"], last=True)
['Larry', None]
:param last:
If True, the last entry is used rather than the first.
:return:
A value, or None if the list is empty.
'''
result = []
row = self.returnOneEntry(last=last)
if not row:
return None
dict_row = internal.convert_to_dict(row)
for field in key_list:
result.append(dict_row.get(field, None))
return result | Return a list of key values for the first entry in the current list.
If 'last=True', then the last entry is referenced."
Returns None is the list is empty. If a key is missing, then
that entry in the list is None.
Example of use:
>>> test = [
... {"name": "Jim", "age": 18, "income": 93000, "order": 2},
... {"name": "Larry", "age": 18, "order": 3},
... {"name": "Joe", "age": 20, "income": 15000, "order": 1},
... {"name": "Bill", "age": 19, "income": 29000, "order": 4},
... ]
>>> print PLOD(test).returnValueList(["name", "income"])
['Jim', 93000]
>>> print PLOD(test).sort("name").returnValueList(["name", "income"], last=True)
['Larry', None]
:param last:
If True, the last entry is used rather than the first.
:return:
A value, or None if the list is empty. |
def apply_new_scoped_variable_type(self, path, new_variable_type_str):
"""Applies the new data type of the scoped variable defined by path
:param str path: The path identifying the edited variable
:param str new_variable_type_str: New data type as str
"""
data_port_id = self.list_store[path][self.ID_STORAGE_ID]
try:
if self.model.state.scoped_variables[data_port_id].data_type.__name__ != new_variable_type_str:
self.model.state.scoped_variables[data_port_id].change_data_type(new_variable_type_str)
except ValueError as e:
logger.error("Error while changing data type: {0}".format(e)) | Applies the new data type of the scoped variable defined by path
:param str path: The path identifying the edited variable
:param str new_variable_type_str: New data type as str |
def normalize_sort(sort=None):
"""
CONVERT SORT PARAMETERS TO A NORMAL FORM SO EASIER TO USE
"""
if not sort:
return Null
output = FlatList()
for s in listwrap(sort):
if is_text(s) or mo_math.is_integer(s):
output.append({"value": s, "sort": 1})
elif not s.field and not s.value and s.sort==None:
#ASSUME {name: sort} FORM
for n, v in s.items():
output.append({"value": n, "sort": sort_direction[v]})
else:
output.append({"value": coalesce(s.field, s.value), "sort": coalesce(sort_direction[s.sort], 1)})
return wrap(output) | CONVERT SORT PARAMETERS TO A NORMAL FORM SO EASIER TO USE |
def zpk2tf(z, p, k):
r"""Return polynomial transfer function representation from zeros and poles
:param ndarray z: Zeros of the transfer function.
:param ndarray p: Poles of the transfer function.
:param float k: System gain.
:return:
b : ndarray Numerator polynomial.
a : ndarray Numerator and denominator polynomials.
:func:`zpk2tf` forms transfer function polynomials from the zeros, poles, and gains
of a system in factored form.
zpk2tf(z,p,k) finds a rational transfer function
.. math:: \frac{B(s)}{A(s)} = \frac{b_1 s^{n-1}+\dots b_{n-1}s+b_n}{a_1 s^{m-1}+\dots a_{m-1}s+a_m}
given a system in factored transfer function form
.. math:: H(s) = \frac{Z(s)}{P(s)} = k \frac{(s-z_1)(s-z_2)\dots(s-z_m)}{(s-p_1)(s-p_2)\dots(s-p_n)}
with p being the pole locations, and z the zero locations, with as many.
The gains for each numerator transfer function are in vector k.
The zeros and poles must be real or come in complex conjugate pairs.
The polynomial denominator coefficients are returned in row vector a and
the polynomial numerator coefficients are returned in matrix b, which has
as many rows as there are columns of z.
Inf values can be used as place holders in z if some columns have fewer zeros than others.
.. note:: wrapper of scipy function zpk2tf
"""
import scipy.signal
b, a = scipy.signal.zpk2tf(z, p, k)
return b, a | r"""Return polynomial transfer function representation from zeros and poles
:param ndarray z: Zeros of the transfer function.
:param ndarray p: Poles of the transfer function.
:param float k: System gain.
:return:
b : ndarray Numerator polynomial.
a : ndarray Numerator and denominator polynomials.
:func:`zpk2tf` forms transfer function polynomials from the zeros, poles, and gains
of a system in factored form.
zpk2tf(z,p,k) finds a rational transfer function
.. math:: \frac{B(s)}{A(s)} = \frac{b_1 s^{n-1}+\dots b_{n-1}s+b_n}{a_1 s^{m-1}+\dots a_{m-1}s+a_m}
given a system in factored transfer function form
.. math:: H(s) = \frac{Z(s)}{P(s)} = k \frac{(s-z_1)(s-z_2)\dots(s-z_m)}{(s-p_1)(s-p_2)\dots(s-p_n)}
with p being the pole locations, and z the zero locations, with as many.
The gains for each numerator transfer function are in vector k.
The zeros and poles must be real or come in complex conjugate pairs.
The polynomial denominator coefficients are returned in row vector a and
the polynomial numerator coefficients are returned in matrix b, which has
as many rows as there are columns of z.
Inf values can be used as place holders in z if some columns have fewer zeros than others.
.. note:: wrapper of scipy function zpk2tf |
def start_trace(reset=True, filter_func=None, time_filter_func=None):
"""Begins a trace. Setting reset to True will reset all previously recorded
trace data. filter_func needs to point to a callable function that accepts
the parameters (call_stack, module_name, class_name, func_name, full_name).
Every call will be passed into this function and it is up to the function
to decide if it should be included or not. Returning False means the call
will be filtered out and not included in the call graph.
"""
global trace_filter
global time_filter
if reset:
reset_trace()
if filter_func:
trace_filter = filter_func
else:
trace_filter = GlobbingFilter(exclude=['pycallgraph.*'])
if time_filter_func:
time_filter = time_filter_func
else:
time_filter = GlobbingFilter()
sys.settrace(tracer) | Begins a trace. Setting reset to True will reset all previously recorded
trace data. filter_func needs to point to a callable function that accepts
the parameters (call_stack, module_name, class_name, func_name, full_name).
Every call will be passed into this function and it is up to the function
to decide if it should be included or not. Returning False means the call
will be filtered out and not included in the call graph. |
def count_if(predicate, seq):
"""Count the number of elements of seq for which the predicate is true.
>>> count_if(callable, [42, None, max, min])
2
"""
f = lambda count, x: count + (not not predicate(x))
return reduce(f, seq, 0) | Count the number of elements of seq for which the predicate is true.
>>> count_if(callable, [42, None, max, min])
2 |
def _gather_all_deps(self, args, kwargs):
"""Count the number of unresolved futures on which a task depends.
Args:
- args (List[args]) : The list of args list to the fn
- kwargs (Dict{kwargs}) : The dict of all kwargs passed to the fn
Returns:
- count, [list of dependencies]
"""
# Check the positional args
depends = []
count = 0
for dep in args:
if isinstance(dep, Future):
if self.tasks[dep.tid]['status'] not in FINAL_STATES:
count += 1
depends.extend([dep])
# Check for explicit kwargs ex, fu_1=<fut>
for key in kwargs:
dep = kwargs[key]
if isinstance(dep, Future):
if self.tasks[dep.tid]['status'] not in FINAL_STATES:
count += 1
depends.extend([dep])
# Check for futures in inputs=[<fut>...]
for dep in kwargs.get('inputs', []):
if isinstance(dep, Future):
if self.tasks[dep.tid]['status'] not in FINAL_STATES:
count += 1
depends.extend([dep])
return count, depends | Count the number of unresolved futures on which a task depends.
Args:
- args (List[args]) : The list of args list to the fn
- kwargs (Dict{kwargs}) : The dict of all kwargs passed to the fn
Returns:
- count, [list of dependencies] |
def register(self, what, obj):
"""
Registering a plugin
Params
------
what: Nature of the plugin (backend, instrumentation, repo)
obj: Instance of the plugin
"""
# print("Registering pattern", name, pattern)
name = obj.name
version = obj.version
enable = obj.enable
if enable == 'n':
return
key = Key(name, version)
self.plugins[what][key] = obj | Registering a plugin
Params
------
what: Nature of the plugin (backend, instrumentation, repo)
obj: Instance of the plugin |
def validate_xml_text(text):
"""validates XML text"""
bad_chars = __INVALID_XML_CHARS & set(text)
if bad_chars:
for offset,c in enumerate(text):
if c in bad_chars:
raise RuntimeError('invalid XML character: ' + repr(c) + ' at offset ' + str(offset)) | validates XML text |
def get_feature_by_path(self, locus, term, rank, accession, **kwargs):
"""
Retrieve an enumerated sequence feature
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_feature_by_path(locus, term, rank, accession, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str locus: locus name or URI (required)
:param str term: Sequence Ontology (SO) term name, accession, or URI (required)
:param int rank: feature rank, must be at least 1 (required)
:param int accession: accession, must be at least 1 (required)
:return: Feature
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_feature_by_path_with_http_info(locus, term, rank, accession, **kwargs)
else:
(data) = self.get_feature_by_path_with_http_info(locus, term, rank, accession, **kwargs)
return data | Retrieve an enumerated sequence feature
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_feature_by_path(locus, term, rank, accession, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str locus: locus name or URI (required)
:param str term: Sequence Ontology (SO) term name, accession, or URI (required)
:param int rank: feature rank, must be at least 1 (required)
:param int accession: accession, must be at least 1 (required)
:return: Feature
If the method is called asynchronously,
returns the request thread. |
def _reaction_po_to_dict(tokens) -> Reaction:
"""Convert a reaction parse object to a DSL.
:type tokens: ParseResult
"""
return Reaction(
reactants=_reaction_part_po_to_dict(tokens[REACTANTS]),
products=_reaction_part_po_to_dict(tokens[PRODUCTS]),
) | Convert a reaction parse object to a DSL.
:type tokens: ParseResult |
def _radec(self,*args,**kwargs):
"""Calculate ra and dec"""
lbd= self._lbd(*args,**kwargs)
return coords.lb_to_radec(lbd[:,0],lbd[:,1],degree=True,epoch=None) | Calculate ra and dec |
def configureIAMCredentials(self, AWSAccessKeyID, AWSSecretAccessKey, AWSSessionToken=""):
"""
**Description**
Used to configure/update the custom IAM credentials for Websocket SigV4 connection to
AWS IoT. Should be called before connect.
**Syntax**
.. code:: python
myAWSIoTMQTTClient.configureIAMCredentials(obtainedAccessKeyID, obtainedSecretAccessKey, obtainedSessionToken)
.. note::
Hard-coding credentials into custom script is NOT recommended. Please use AWS Cognito identity service
or other credential provider.
**Parameters**
*AWSAccessKeyID* - AWS Access Key Id from user-specific IAM credentials.
*AWSSecretAccessKey* - AWS Secret Access Key from user-specific IAM credentials.
*AWSSessionToken* - AWS Session Token for temporary authentication from STS.
**Returns**
None
"""
iam_credentials_provider = IAMCredentialsProvider()
iam_credentials_provider.set_access_key_id(AWSAccessKeyID)
iam_credentials_provider.set_secret_access_key(AWSSecretAccessKey)
iam_credentials_provider.set_session_token(AWSSessionToken)
self._mqtt_core.configure_iam_credentials(iam_credentials_provider) | **Description**
Used to configure/update the custom IAM credentials for Websocket SigV4 connection to
AWS IoT. Should be called before connect.
**Syntax**
.. code:: python
myAWSIoTMQTTClient.configureIAMCredentials(obtainedAccessKeyID, obtainedSecretAccessKey, obtainedSessionToken)
.. note::
Hard-coding credentials into custom script is NOT recommended. Please use AWS Cognito identity service
or other credential provider.
**Parameters**
*AWSAccessKeyID* - AWS Access Key Id from user-specific IAM credentials.
*AWSSecretAccessKey* - AWS Secret Access Key from user-specific IAM credentials.
*AWSSessionToken* - AWS Session Token for temporary authentication from STS.
**Returns**
None |
def _invalid_implementation(self, t, missing, mistyped, mismatched):
"""
Make a TypeError explaining why ``t`` doesn't implement our interface.
"""
assert missing or mistyped or mismatched, "Implementation wasn't invalid."
message = "\nclass {C} failed to implement interface {I}:".format(
C=getname(t),
I=getname(self),
)
if missing:
message += dedent(
"""
The following methods of {I} were not implemented:
{missing_methods}"""
).format(
I=getname(self),
missing_methods=self._format_missing_methods(missing)
)
if mistyped:
message += dedent(
"""
The following methods of {I} were implemented with incorrect types:
{mismatched_types}"""
).format(
I=getname(self),
mismatched_types=self._format_mismatched_types(mistyped),
)
if mismatched:
message += dedent(
"""
The following methods of {I} were implemented with invalid signatures:
{mismatched_methods}"""
).format(
I=getname(self),
mismatched_methods=self._format_mismatched_methods(mismatched),
)
return InvalidImplementation(message) | Make a TypeError explaining why ``t`` doesn't implement our interface. |
def redistribute_threads(blockdimx, blockdimy, blockdimz,
dimx, dimy, dimz):
"""
Redistribute threads from the Z dimension towards the X dimension.
Also clamp number of threads to the problem dimension size,
if necessary
"""
# Shift threads from the z dimension
# into the y dimension
while blockdimz > dimz:
tmp = blockdimz // 2
if tmp < dimz:
break
blockdimy *= 2
blockdimz = tmp
# Shift threads from the y dimension
# into the x dimension
while blockdimy > dimy:
tmp = blockdimy // 2
if tmp < dimy:
break
blockdimx *= 2
blockdimy = tmp
# Clamp the block dimensions
# if necessary
if dimx < blockdimx:
blockdimx = dimx
if dimy < blockdimy:
blockdimy = dimy
if dimz < blockdimz:
blockdimz = dimz
return blockdimx, blockdimy, blockdimz | Redistribute threads from the Z dimension towards the X dimension.
Also clamp number of threads to the problem dimension size,
if necessary |
def set_dimmer_start_time(self, hour, minute):
"""Set start time for task (hh:mm) in iso8601.
NB: dimmer starts 30 mins before time in app
"""
# This is to calculate the difference between local time
# and the time in the gateway
d1 = self._gateway.get_gateway_info().current_time
d2 = dt.utcnow()
diff = d1 - d2
newtime = dt(100, 1, 1, hour, minute, 00) - diff
command = {
ATTR_SMART_TASK_TRIGGER_TIME_INTERVAL:
[{
ATTR_SMART_TASK_TRIGGER_TIME_START_HOUR: newtime.hour,
ATTR_SMART_TASK_TRIGGER_TIME_START_MIN: newtime.minute
}]
}
return self._task.set_values(command) | Set start time for task (hh:mm) in iso8601.
NB: dimmer starts 30 mins before time in app |
def create(*context, **kwargs):
"""
Build a ContextStack instance from a sequence of context-like items.
This factory-style method is more general than the ContextStack class's
constructor in that, unlike the constructor, the argument list
can itself contain ContextStack instances.
Here is an example illustrating various aspects of this method:
>>> obj1 = {'animal': 'cat', 'vegetable': 'carrot', 'mineral': 'copper'}
>>> obj2 = ContextStack({'vegetable': 'spinach', 'mineral': 'silver'})
>>>
>>> context = ContextStack.create(obj1, None, obj2, mineral='gold')
>>>
>>> context.get('animal')
'cat'
>>> context.get('vegetable')
'spinach'
>>> context.get('mineral')
'gold'
Arguments:
*context: zero or more dictionaries, ContextStack instances, or objects
with which to populate the initial context stack. None
arguments will be skipped. Items in the *context list are
added to the stack in order so that later items in the argument
list take precedence over earlier items. This behavior is the
same as the constructor's.
**kwargs: additional key-value data to add to the context stack.
As these arguments appear after all items in the *context list,
in the case of key conflicts these values take precedence over
all items in the *context list. This behavior is the same as
the constructor's.
"""
items = context
context = ContextStack()
for item in items:
if item is None:
continue
if isinstance(item, ContextStack):
context._stack.extend(item._stack)
else:
context.push(item)
if kwargs:
context.push(kwargs)
return context | Build a ContextStack instance from a sequence of context-like items.
This factory-style method is more general than the ContextStack class's
constructor in that, unlike the constructor, the argument list
can itself contain ContextStack instances.
Here is an example illustrating various aspects of this method:
>>> obj1 = {'animal': 'cat', 'vegetable': 'carrot', 'mineral': 'copper'}
>>> obj2 = ContextStack({'vegetable': 'spinach', 'mineral': 'silver'})
>>>
>>> context = ContextStack.create(obj1, None, obj2, mineral='gold')
>>>
>>> context.get('animal')
'cat'
>>> context.get('vegetable')
'spinach'
>>> context.get('mineral')
'gold'
Arguments:
*context: zero or more dictionaries, ContextStack instances, or objects
with which to populate the initial context stack. None
arguments will be skipped. Items in the *context list are
added to the stack in order so that later items in the argument
list take precedence over earlier items. This behavior is the
same as the constructor's.
**kwargs: additional key-value data to add to the context stack.
As these arguments appear after all items in the *context list,
in the case of key conflicts these values take precedence over
all items in the *context list. This behavior is the same as
the constructor's. |
def _make_r_patches(data, K_g, critical_r, indices, approx):
'''Helper function for :py:func:`.make_r_gaussmix` and
:py:func:`.make_r_tmix`. Group the ``data`` according to the R value
and split each group into ``K_g`` patches. Return the patch means
and covariances. For details see the docstrings of the above mentioned
functions.
'''
def append_components(means, covs, data, partition):
subdata_start = 0
subdata_stop = partition[0]
for len_subdata in partition:
subdata = data[subdata_start:subdata_stop]
means.append( _np.mean(subdata, axis=0) )
covs.append ( _np.cov (subdata, rowvar=0) )
subdata_start += len_subdata
subdata_stop += len_subdata
n = len(data[0])
for item in data:
assert len(item) == n, 'Every chain must bring the same number of points.'
data = [_np.asarray(d) for d in data]
if indices is None:
# choose all parameters
indices = _np.arange(data[0].shape[1])
assert len(indices) > 0, 'Invalid specification of parameter indices. Need a non-empty iterable, got ' + str(indices)
# select columns of parameters through indices
chain_groups = r_group([_np.mean(chain_values.T[indices], axis=1) for chain_values in data],
[_np.var (chain_values.T[indices], axis=1, ddof=1) for chain_values in data],
n, critical_r, approx)
long_patches_means = []
long_patches_covs = []
for group in chain_groups:
# we want K_g components from k_g = len(group) chains
k_g = len(group)
if K_g >= k_g:
# find minimal lexicographic integer partition
n = _part(K_g, k_g)
for i, chain_index in enumerate(group):
# need to partition in n[i] parts
data_full_chain = data[chain_index]
# find minimal lexicographic integer partition of chain_length into n[i]
this_patch_lengths = _part(len(data_full_chain), n[i])
append_components(long_patches_means, long_patches_covs, data_full_chain, this_patch_lengths)
else:
# form one long chain and set k_g = 1
k_g = 1
# make one large chain
data_full_chain = _np.vstack([data[i] for i in group])
# need to partition into K_g parts -- > minimal lexicographic integer partition
this_patch_lengths = _part(len(data_full_chain), K_g)
append_components(long_patches_means, long_patches_covs, data_full_chain, this_patch_lengths)
return long_patches_means, long_patches_covs | Helper function for :py:func:`.make_r_gaussmix` and
:py:func:`.make_r_tmix`. Group the ``data`` according to the R value
and split each group into ``K_g`` patches. Return the patch means
and covariances. For details see the docstrings of the above mentioned
functions. |
def load(self, dump_fn='', prep_only=0, force_upload=0, from_local=0, name=None, site=None, dest_dir=None):
"""
Restores a database snapshot onto the target database server.
If prep_only=1, commands for preparing the load will be generated,
but not the command to finally load the snapshot.
"""
r = self.database_renderer(name=name, site=site)
# Render the snapshot filename.
r.env.dump_fn = self.get_default_db_fn(fn_template=dump_fn, dest_dir=dest_dir)
from_local = int(from_local)
prep_only = int(prep_only)
missing_local_dump_error = r.format('Database dump file {dump_fn} does not exist.')
# Copy snapshot file to target.
if self.is_local:
r.env.remote_dump_fn = dump_fn
else:
r.env.remote_dump_fn = '/tmp/' + os.path.split(r.env.dump_fn)[-1]
if not prep_only and not self.is_local:
if not self.dryrun:
assert os.path.isfile(r.env.dump_fn), missing_local_dump_error
r.pc('Uploading MongoDB database snapshot...')
# r.put(
# local_path=r.env.dump_fn,
# remote_path=r.env.remote_dump_fn)
r.local('rsync -rvz --progress --no-p --no-g '
'--rsh "ssh -o StrictHostKeyChecking=no -i {key_filename}" '
'{dump_fn} {user}@{host_string}:{remote_dump_fn}')
if self.is_local and not prep_only and not self.dryrun:
assert os.path.isfile(r.env.dump_fn), missing_local_dump_error
r.run_or_local(r.env.load_command) | Restores a database snapshot onto the target database server.
If prep_only=1, commands for preparing the load will be generated,
but not the command to finally load the snapshot. |
def enter_eventloop(self):
"""enter eventloop"""
self.log.info("entering eventloop")
# restore default_int_handler
signal(SIGINT, default_int_handler)
while self.eventloop is not None:
try:
self.eventloop(self)
except KeyboardInterrupt:
# Ctrl-C shouldn't crash the kernel
self.log.error("KeyboardInterrupt caught in kernel")
continue
else:
# eventloop exited cleanly, this means we should stop (right?)
self.eventloop = None
break
self.log.info("exiting eventloop")
# if eventloop exits, IOLoop should stop
ioloop.IOLoop.instance().stop() | enter eventloop |
def cleanup_unreachable(rdf):
"""Remove triples which cannot be reached from the concepts by graph
traversal."""
all_subjects = set(rdf.subjects())
logging.debug("total subject resources: %d", len(all_subjects))
reachable = find_reachable(rdf, SKOS.Concept)
nonreachable = all_subjects - reachable
logging.debug("deleting %s non-reachable resources", len(nonreachable))
for subj in nonreachable:
delete_uri(rdf, subj) | Remove triples which cannot be reached from the concepts by graph
traversal. |
def resample_signal(self, data_frame):
"""
Convenience method for frequency conversion and resampling of data frame.
Object must have a DatetimeIndex. After re-sampling, this methods interpolate the time magnitude sum
acceleration values and the x,y,z values of the data frame acceleration
:param data_frame: the data frame to resample
:param str sampling_frequency: the sampling frequency. Default is 100Hz, as recommended by the author of the pilot study [1]
"""
new_freq = np.round(1 / self.sampling_frequency, decimals=6)
df_resampled = data_frame.resample(str(new_freq) + 'S').mean()
# f = interpolate.interp1d(data_frame.td, data_frame.mag_sum_acc)
# new_timestamp = np.arange(data_frame.td[0], data_frame.td[-1], 1.0 / self.sampling_frequency)
# df_resampled.mag_sum_acc = f(new_timestamp)
logging.debug("resample signal")
df_resampled = df_resampled.interpolate(method='linear')
get_sampling_rate_from_timestamp(df_resampled)
# df_resampled['td'] = df_resampled.index - df_resampled.index[0]
return df_resampled | Convenience method for frequency conversion and resampling of data frame.
Object must have a DatetimeIndex. After re-sampling, this methods interpolate the time magnitude sum
acceleration values and the x,y,z values of the data frame acceleration
:param data_frame: the data frame to resample
:param str sampling_frequency: the sampling frequency. Default is 100Hz, as recommended by the author of the pilot study [1] |
def _stripslashes(s):
'''Removes trailing and leading backslashes from string'''
r = re.sub(r"\\(n|r)", "\n", s)
r = re.sub(r"\\", "", r)
return r | Removes trailing and leading backslashes from string |
def _set_status(self, status, message=''):
""" Updates the status and message on all supported IM apps.
`status`
Status type (See ``VALID_STATUSES``).
`message`
Status message.
"""
message = message.strip()
# fetch away message from provided id
if message.startswith(':'):
msg_id = message[1:]
message = self.messages.get(msg_id, '')
message = message.encode('utf-8', 'replace')
# attempt to set status for each supported application
for func in self.set_status_funcs:
func(status, message) | Updates the status and message on all supported IM apps.
`status`
Status type (See ``VALID_STATUSES``).
`message`
Status message. |
def from_bytes(cls, b):
"""Create an APNG from raw bytes.
:arg bytes b: The raw bytes of the APNG file.
:rtype: APNG
"""
hdr = None
head_chunks = []
end = ("IEND", make_chunk("IEND", b""))
frame_chunks = []
frames = []
num_plays = 0
frame_has_head_chunks = False
control = None
for type_, data in parse_chunks(b):
if type_ == "IHDR":
hdr = data
frame_chunks.append((type_, data))
elif type_ == "acTL":
_num_frames, num_plays = struct.unpack("!II", data[8:-4])
continue
elif type_ == "fcTL":
if any(type_ == "IDAT" for type_, data in frame_chunks):
# IDAT inside chunk, go to next frame
frame_chunks.append(end)
frames.append((PNG.from_chunks(frame_chunks), control))
frame_has_head_chunks = False
control = FrameControl.from_bytes(data[12:-4])
# https://github.com/PyCQA/pylint/issues/2072
# pylint: disable=typecheck
hdr = make_chunk("IHDR", struct.pack("!II", control.width, control.height) + hdr[16:-4])
frame_chunks = [("IHDR", hdr)]
else:
control = FrameControl.from_bytes(data[12:-4])
elif type_ == "IDAT":
if not frame_has_head_chunks:
frame_chunks.extend(head_chunks)
frame_has_head_chunks = True
frame_chunks.append((type_, data))
elif type_ == "fdAT":
# convert to IDAT
if not frame_has_head_chunks:
frame_chunks.extend(head_chunks)
frame_has_head_chunks = True
frame_chunks.append(("IDAT", make_chunk("IDAT", data[12:-4])))
elif type_ == "IEND":
# end
frame_chunks.append(end)
frames.append((PNG.from_chunks(frame_chunks), control))
break
elif type_ in CHUNK_BEFORE_IDAT:
head_chunks.append((type_, data))
else:
frame_chunks.append((type_, data))
o = cls()
o.frames = frames
o.num_plays = num_plays
return o | Create an APNG from raw bytes.
:arg bytes b: The raw bytes of the APNG file.
:rtype: APNG |
def is_same(type1, type2):
"""returns True, if type1 and type2 are same types"""
nake_type1 = remove_declarated(type1)
nake_type2 = remove_declarated(type2)
return nake_type1 == nake_type2 | returns True, if type1 and type2 are same types |
def _get_log_model_class(self):
"""Cache for fetching the actual log model object once django is loaded.
Otherwise, import conflict occur: WorkflowEnabled imports <log_model>
which tries to import all models to retrieve the proper model class.
"""
if self.log_model_class is not None:
return self.log_model_class
app_label, model_label = self.log_model.rsplit('.', 1)
self.log_model_class = apps.get_model(app_label, model_label)
return self.log_model_class | Cache for fetching the actual log model object once django is loaded.
Otherwise, import conflict occur: WorkflowEnabled imports <log_model>
which tries to import all models to retrieve the proper model class. |
def shelter_listbybreed(self, **kwargs):
"""
shelter.listByBreed wrapper. Given a breed and an animal type, list
the shelter IDs with pets of said breed.
:rtype: generator
:returns: A generator of shelter IDs that have breed matches.
"""
root = self._do_api_call("shelter.listByBreed", kwargs)
shelter_ids = root.findall("shelterIds/id")
for shelter_id in shelter_ids:
yield shelter_id.text | shelter.listByBreed wrapper. Given a breed and an animal type, list
the shelter IDs with pets of said breed.
:rtype: generator
:returns: A generator of shelter IDs that have breed matches. |
def move(zone, zonepath):
'''
Move zone to new zonepath.
zone : string
name or uuid of the zone
zonepath : string
new zonepath
CLI Example:
.. code-block:: bash
salt '*' zoneadm.move meave /sweetwater/meave
'''
ret = {'status': True}
## verify zone
res = __salt__['cmd.run_all']('zoneadm {zone} move {path}'.format(
zone='-u {0}'.format(zone) if _is_uuid(zone) else '-z {0}'.format(zone),
path=zonepath,
))
ret['status'] = res['retcode'] == 0
ret['message'] = res['stdout'] if ret['status'] else res['stderr']
ret['message'] = ret['message'].replace('zoneadm: ', '')
if ret['message'] == '':
del ret['message']
return ret | Move zone to new zonepath.
zone : string
name or uuid of the zone
zonepath : string
new zonepath
CLI Example:
.. code-block:: bash
salt '*' zoneadm.move meave /sweetwater/meave |
def get_profile(session):
"""Get profile data."""
response = session.get(PROFILE_URL, allow_redirects=False)
if response.status_code == 302:
raise USPSError('expired session')
parsed = BeautifulSoup(response.text, HTML_PARSER)
profile = parsed.find('div', {'class': 'atg_store_myProfileInfo'})
data = {}
for row in profile.find_all('tr'):
cells = row.find_all('td')
if len(cells) == 2:
key = ' '.join(cells[0].find_all(text=True)).strip().lower().replace(' ', '_')
value = ' '.join(cells[1].find_all(text=True)).strip()
data[key] = value
return data | Get profile data. |
def add_interactions_from(self, ebunch, t=None, e=None):
"""Add all the interaction in ebunch at time t.
Parameters
----------
ebunch : container of interaction
Each interaction given in the container will be added to the
graph. The interaction must be given as as 2-tuples (u,v) or
3-tuples (u,v,d) where d is a dictionary containing interaction
data.
t : appearance snapshot id, mandatory
e : vanishing snapshot id, optional
See Also
--------
add_edge : add a single interaction
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_edges_from([(0,1),(1,2)], t=0)
"""
# set up attribute dict
if t is None:
raise nx.NetworkXError(
"The t argument must be a specified.")
# process ebunch
for ed in ebunch:
self.add_interaction(ed[0], ed[1], t, e) | Add all the interaction in ebunch at time t.
Parameters
----------
ebunch : container of interaction
Each interaction given in the container will be added to the
graph. The interaction must be given as as 2-tuples (u,v) or
3-tuples (u,v,d) where d is a dictionary containing interaction
data.
t : appearance snapshot id, mandatory
e : vanishing snapshot id, optional
See Also
--------
add_edge : add a single interaction
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_edges_from([(0,1),(1,2)], t=0) |
def imagecapture(self, window_name=None, x=0, y=0,
width=None, height=None):
"""
Captures screenshot of the whole desktop or given window
@param window_name: Window name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param x: x co-ordinate value
@type x: int
@param y: y co-ordinate value
@type y: int
@param width: width co-ordinate value
@type width: int
@param height: height co-ordinate value
@type height: int
@return: screenshot with base64 encoded for the client
@rtype: string
"""
if x or y or (width and width != -1) or (height and height != -1):
raise LdtpServerException("Not implemented")
if window_name:
handle, name, app = self._get_window_handle(window_name)
try:
self._grabfocus(handle)
except:
pass
rect = self._getobjectsize(handle)
screenshot = CGWindowListCreateImage(NSMakeRect(rect[0],
rect[1], rect[2], rect[3]), 1, 0, 0)
else:
screenshot = CGWindowListCreateImage(CGRectInfinite, 1, 0, 0)
image = CIImage.imageWithCGImage_(screenshot)
bitmapRep = NSBitmapImageRep.alloc().initWithCIImage_(image)
blob = bitmapRep.representationUsingType_properties_(NSPNGFileType, None)
tmpFile = tempfile.mktemp('.png', 'ldtpd_')
blob.writeToFile_atomically_(tmpFile, False)
rv = b64encode(open(tmpFile).read())
os.remove(tmpFile)
return rv | Captures screenshot of the whole desktop or given window
@param window_name: Window name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param x: x co-ordinate value
@type x: int
@param y: y co-ordinate value
@type y: int
@param width: width co-ordinate value
@type width: int
@param height: height co-ordinate value
@type height: int
@return: screenshot with base64 encoded for the client
@rtype: string |
def set_stack_address_mapping(self, absolute_address, region_id, related_function_address=None):
"""
Create a new mapping between an absolute address (which is the base address of a specific stack frame) and a
region ID.
:param absolute_address: The absolute memory address.
:param region_id: The region ID.
:param related_function_address: Related function address.
"""
if self._stack_region_map is None:
raise SimMemoryError('Stack region map is not initialized.')
self._stack_region_map.map(absolute_address, region_id, related_function_address=related_function_address) | Create a new mapping between an absolute address (which is the base address of a specific stack frame) and a
region ID.
:param absolute_address: The absolute memory address.
:param region_id: The region ID.
:param related_function_address: Related function address. |
def s15f16l(s):
"""Convert sequence of ICC s15Fixed16 to list of float."""
# Note: As long as float has at least 32 bits of mantissa, all
# values are preserved.
n = len(s) // 4
t = struct.unpack('>%dl' % n, s)
return map((2**-16).__mul__, t) | Convert sequence of ICC s15Fixed16 to list of float. |
def _get(self, key, what):
"""Generic getter magic method.
The node with the nearest but not less hash value is returned.
:param key: the key to look for.
:param what: the information to look for in, allowed values:
- instance (default): associated node instance
- nodename: node name
- pos: index of the given key in the ring
- tuple: ketama compatible (pos, name) tuple
- weight: node weight
"""
if not self.runtime._ring:
return None
pos = self._get_pos(key)
if what == 'pos':
return pos
nodename = self.runtime._ring[self.runtime._keys[pos]]
if what in ['hostname', 'instance', 'port', 'weight']:
return self.runtime._nodes[nodename][what]
elif what == 'dict':
return self.runtime._nodes[nodename]
elif what == 'nodename':
return nodename
elif what == 'tuple':
return (self.runtime._keys[pos], nodename) | Generic getter magic method.
The node with the nearest but not less hash value is returned.
:param key: the key to look for.
:param what: the information to look for in, allowed values:
- instance (default): associated node instance
- nodename: node name
- pos: index of the given key in the ring
- tuple: ketama compatible (pos, name) tuple
- weight: node weight |
def checkBinary(name, bindir=None):
"""
Checks for the given binary in the places, defined by the environment
variables SUMO_HOME and <NAME>_BINARY.
"""
if name == "sumo-gui":
envName = "GUISIM_BINARY"
else:
envName = name.upper() + "_BINARY"
env = os.environ
join = os.path.join
if envName in env and exeExists(env.get(envName)):
return env.get(envName)
if bindir is not None:
binary = join(bindir, name)
if exeExists(binary):
return binary
if "SUMO_HOME" in env:
binary = join(env.get("SUMO_HOME"), "bin", name)
if exeExists(binary):
return binary
binary = os.path.abspath(
join(os.path.dirname(__file__), '..', '..', 'bin', name))
if exeExists(binary):
return binary
return name | Checks for the given binary in the places, defined by the environment
variables SUMO_HOME and <NAME>_BINARY. |
def OnPadIntCtrl(self, event):
"""Pad IntCtrl event handler"""
self.attrs["pad"] = event.GetValue()
post_command_event(self, self.DrawChartMsg) | Pad IntCtrl event handler |
def _get_remote(self, cached=True):
'''
Helper function to determine remote
:param cached:
Use cached values or query remotes
'''
return self.m(
'getting current remote',
cmdd=dict(
cmd='git remote show %s' % ('-n' if cached else ''),
cwd=self.local
),
verbose=False
) | Helper function to determine remote
:param cached:
Use cached values or query remotes |
def from_conll(this_class, stream):
"""Construct a Corpus. stream is an iterable over strings where
each string is a line in CoNLL-X format."""
stream = iter(stream)
corpus = this_class()
while 1:
# read until we get an empty sentence
sentence = Sentence.from_conll(stream)
if sentence:
corpus.append(sentence)
else:
break
return corpus | Construct a Corpus. stream is an iterable over strings where
each string is a line in CoNLL-X format. |
def remove(path):
'''Remove a cached environment. Removed paths will no longer be able to
be activated by name'''
r = cpenv.resolve(path)
if isinstance(r.resolved[0], cpenv.VirtualEnvironment):
EnvironmentCache.discard(r.resolved[0])
EnvironmentCache.save() | Remove a cached environment. Removed paths will no longer be able to
be activated by name |
def sample_slice(args):
"""
Return a new live point proposed by a series of random slices
away from an existing live point. Standard "Gibs-like" implementation where
a single multivariate "slice" is a combination of `ndim` univariate slices
through each axis.
Parameters
----------
u : `~numpy.ndarray` with shape (npdim,)
Position of the initial sample. **This is a copy of an existing live
point.**
loglstar : float
Ln(likelihood) bound.
axes : `~numpy.ndarray` with shape (ndim, ndim)
Axes used to propose new points. For slices new positions are
proposed along the arthogonal basis defined by :data:`axes`.
scale : float
Value used to scale the provided axes.
prior_transform : function
Function transforming a sample from the a unit cube to the parameter
space of interest according to the prior.
loglikelihood : function
Function returning ln(likelihood) given parameters as a 1-d `~numpy`
array of length `ndim`.
kwargs : dict
A dictionary of additional method-specific parameters.
Returns
-------
u : `~numpy.ndarray` with shape (npdim,)
Position of the final proposed point within the unit cube.
v : `~numpy.ndarray` with shape (ndim,)
Position of the final proposed point in the target parameter space.
logl : float
Ln(likelihood) of the final proposed point.
nc : int
Number of function calls used to generate the sample.
blob : dict
Collection of ancillary quantities used to tune :data:`scale`.
"""
# Unzipping.
(u, loglstar, axes, scale,
prior_transform, loglikelihood, kwargs) = args
rstate = np.random
# Periodicity.
nonperiodic = kwargs.get('nonperiodic', None)
# Setup.
n = len(u)
slices = kwargs.get('slices', 5) # number of slices
nc = 0
nexpand = 0
ncontract = 0
fscale = []
# Modifying axes and computing lengths.
axes = scale * axes.T # scale based on past tuning
axlens = [linalg.norm(axis) for axis in axes]
# Slice sampling loop.
for it in range(slices):
# Shuffle axis update order.
idxs = np.arange(n)
rstate.shuffle(idxs)
# Slice sample along a random direction.
for idx in idxs:
# Select axis.
axis = axes[idx]
axlen = axlens[idx]
# Define starting "window".
r = rstate.rand() # initial scale/offset
u_l = u - r * axis # left bound
if unitcheck(u_l, nonperiodic):
v_l = prior_transform(np.array(u_l))
logl_l = loglikelihood(np.array(v_l))
else:
logl_l = -np.inf
nc += 1
nexpand += 1
u_r = u + (1 - r) * axis # right bound
if unitcheck(u_r, nonperiodic):
v_r = prior_transform(np.array(u_r))
logl_r = loglikelihood(np.array(v_r))
else:
logl_r = -np.inf
nc += 1
nexpand += 1
# "Stepping out" the left and right bounds.
while logl_l >= loglstar:
u_l -= axis
if unitcheck(u_l, nonperiodic):
v_l = prior_transform(np.array(u_l))
logl_l = loglikelihood(np.array(v_l))
else:
logl_l = -np.inf
nc += 1
nexpand += 1
while logl_r >= loglstar:
u_r += axis
if unitcheck(u_r, nonperiodic):
v_r = prior_transform(np.array(u_r))
logl_r = loglikelihood(np.array(v_r))
else:
logl_r = -np.inf
nc += 1
nexpand += 1
# Sample within limits. If the sample is not valid, shrink
# the limits until we hit the `loglstar` bound.
while True:
u_hat = u_r - u_l
u_prop = u_l + rstate.rand() * u_hat # scale from left
if unitcheck(u_prop, nonperiodic):
v_prop = prior_transform(np.array(u_prop))
logl_prop = loglikelihood(np.array(v_prop))
else:
logl_prop = -np.inf
nc += 1
ncontract += 1
# If we succeed, move to the new position.
if logl_prop >= loglstar:
window = linalg.norm(u_hat) # length of window
fscale.append(window / axlen)
u = u_prop
break
# If we fail, check if the new point is to the left/right of
# our original point along our proposal axis and update
# the bounds accordingly.
else:
s = np.dot(u_prop - u, u_hat) # check sign (+/-)
if s < 0: # left
u_l = u_prop
elif s > 0: # right
u_r = u_prop
else:
raise RuntimeError("Slice sampler has failed to find "
"a valid point. Some useful "
"output quantities:\n"
"u: {0}\n"
"u_left: {1}\n"
"u_right: {2}\n"
"u_hat: {3}\n"
"u_prop: {4}\n"
"loglstar: {5}\n"
"logl_prop: {6}\n"
"axes: {7}\n"
"axlens: {8}\n"
"s: {9}."
.format(u, u_l, u_r, u_hat, u_prop,
loglstar, logl_prop,
axes, axlens, s))
blob = {'fscale': np.mean(fscale),
'nexpand': nexpand, 'ncontract': ncontract}
return u_prop, v_prop, logl_prop, nc, blob | Return a new live point proposed by a series of random slices
away from an existing live point. Standard "Gibs-like" implementation where
a single multivariate "slice" is a combination of `ndim` univariate slices
through each axis.
Parameters
----------
u : `~numpy.ndarray` with shape (npdim,)
Position of the initial sample. **This is a copy of an existing live
point.**
loglstar : float
Ln(likelihood) bound.
axes : `~numpy.ndarray` with shape (ndim, ndim)
Axes used to propose new points. For slices new positions are
proposed along the arthogonal basis defined by :data:`axes`.
scale : float
Value used to scale the provided axes.
prior_transform : function
Function transforming a sample from the a unit cube to the parameter
space of interest according to the prior.
loglikelihood : function
Function returning ln(likelihood) given parameters as a 1-d `~numpy`
array of length `ndim`.
kwargs : dict
A dictionary of additional method-specific parameters.
Returns
-------
u : `~numpy.ndarray` with shape (npdim,)
Position of the final proposed point within the unit cube.
v : `~numpy.ndarray` with shape (ndim,)
Position of the final proposed point in the target parameter space.
logl : float
Ln(likelihood) of the final proposed point.
nc : int
Number of function calls used to generate the sample.
blob : dict
Collection of ancillary quantities used to tune :data:`scale`. |
def _execute(self, cursor, statements):
""""
Executes a list of statements, returning an iterator of results sets. Each
statement should be a tuple of (statement, params).
"""
payload = [{'statement': s, 'parameters': p, 'resultDataContents':['rest']} for (s, p) in statements]
http_response = self._http_req("POST", self._tx, {'statements': payload})
if self._tx == TX_ENDPOINT:
self._tx = http_response.getheader('Location')
response = self._deserialize(http_response)
self._handle_errors(response, cursor, cursor)
return response['results'][-1] | Executes a list of statements, returning an iterator of results sets. Each
statement should be a tuple of (statement, params). |
def list(self, search_opts=None):
"""Get a list of Plugins."""
query = base.get_query_string(search_opts)
return self._list('/plugins%s' % query, 'plugins') | Get a list of Plugins. |
def get_ball_by_ball(self, match_key, over_key=None):
"""
match_key: key of the match
over_key : key of the over
Return:
json data:
"""
if over_key:
ball_by_ball_url = "{base_path}match/{match_key}/balls/{over_key}/".format(base_path=self.api_path, match_key=match_key, over_key=over_key)
else:
ball_by_ball_url = "{base_path}match/{match_key}/balls/".format(base_path=self.api_path, match_key=match_key)
response = self.get_response(ball_by_ball_url)
return response | match_key: key of the match
over_key : key of the over
Return:
json data: |
def check_applied(result):
"""
Raises LWTException if it looks like a failed LWT request. A LWTException
won't be raised in the special case in which there are several failed LWT
in a :class:`~cqlengine.query.BatchQuery`.
"""
try:
applied = result.was_applied
except Exception:
applied = True # result was not LWT form
if not applied:
raise LWTException(result.one()) | Raises LWTException if it looks like a failed LWT request. A LWTException
won't be raised in the special case in which there are several failed LWT
in a :class:`~cqlengine.query.BatchQuery`. |
def updateSocialTone(user, socialTone, maintainHistory):
"""
updateSocialTone updates the user with the social tones interpreted based on
the specified thresholds
@param user a json object representing user information (tone) to be used in
conversing with the Conversation Service
@param socialTone a json object containing the social tones in the payload
returned by the Tone Analyzer
"""
currentSocial = []
currentSocialObject = []
# Process each social tone and determine if it is high or low
for tone in socialTone['tones']:
if tone['score'] >= SOCIAL_HIGH_SCORE_THRESHOLD:
currentSocial.append(tone['tone_name'].lower() + '_high')
currentSocialObject.append({
'tone_name': tone['tone_name'].lower(),
'score': tone['score'],
'interpretation': 'likely high'
})
elif tone['score'] <= SOCIAL_LOW_SCORE_THRESHOLD:
currentSocial.append(tone['tone_name'].lower() + '_low')
currentSocialObject.append({
'tone_name': tone['tone_name'].lower(),
'score': tone['score'],
'interpretation': 'likely low'
})
else:
currentSocialObject.append({
'tone_name': tone['tone_name'].lower(),
'score': tone['score'],
'interpretation': 'likely medium'
})
# update user social tone
user['tone']['social']['current'] = currentSocial
if maintainHistory:
if not user['tone']['social']['current']:
user['tone']['social']['current'] = []
user['tone']['social']['current'].append(currentSocialObject) | updateSocialTone updates the user with the social tones interpreted based on
the specified thresholds
@param user a json object representing user information (tone) to be used in
conversing with the Conversation Service
@param socialTone a json object containing the social tones in the payload
returned by the Tone Analyzer |
def _set_system_description(self, v, load=False):
"""
Setter method for system_description, mapped from YANG variable /protocol/lldp/system_description (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_system_description is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_system_description() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..50']}), is_leaf=True, yang_name="system-description", rest_name="system-description", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-multi-value': None, u'info': u'The System Description.'}}, namespace='urn:brocade.com:mgmt:brocade-lldp', defining_module='brocade-lldp', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """system_description must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..50']}), is_leaf=True, yang_name="system-description", rest_name="system-description", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-multi-value': None, u'info': u'The System Description.'}}, namespace='urn:brocade.com:mgmt:brocade-lldp', defining_module='brocade-lldp', yang_type='string', is_config=True)""",
})
self.__system_description = t
if hasattr(self, '_set'):
self._set() | Setter method for system_description, mapped from YANG variable /protocol/lldp/system_description (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_system_description is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_system_description() directly. |
def mode(data):
"""Compute an intelligent value for the mode
The most common value in experimental is not very useful if there
are a lot of digits after the comma. This method approaches this
issue by rounding to bin size that is determined by the
Freedman–Diaconis rule.
Parameters
----------
data: 1d ndarray
The data for which the mode should be computed.
Returns
-------
mode: float
The mode computed with the Freedman-Diaconis rule.
"""
# size
n = data.shape[0]
# interquartile range
iqr = np.percentile(data, 75)-np.percentile(data, 25)
# Freedman–Diaconis
bin_size = 2 * iqr / n**(1/3)
if bin_size == 0:
return np.nan
# Add bin_size/2, because we want the center of the bin and
# not the left corner of the bin.
databin = np.round(data/bin_size)*bin_size + bin_size/2
u, indices = np.unique(databin, return_inverse=True)
mode = u[np.argmax(np.bincount(indices))]
return mode | Compute an intelligent value for the mode
The most common value in experimental is not very useful if there
are a lot of digits after the comma. This method approaches this
issue by rounding to bin size that is determined by the
Freedman–Diaconis rule.
Parameters
----------
data: 1d ndarray
The data for which the mode should be computed.
Returns
-------
mode: float
The mode computed with the Freedman-Diaconis rule. |
def add_enclave_tag(self, report_id, name, enclave_id, id_type=None):
"""
Adds a tag to a specific report, for a specific enclave.
:param report_id: The ID of the report
:param name: The name of the tag to be added
:param enclave_id: ID of the enclave where the tag will be added
:param id_type: indicates whether the ID internal or an external ID provided by the user
:return: The ID of the tag that was created.
"""
params = {
'idType': id_type,
'name': name,
'enclaveId': enclave_id
}
resp = self._client.post("reports/%s/tags" % report_id, params=params)
return str(resp.content) | Adds a tag to a specific report, for a specific enclave.
:param report_id: The ID of the report
:param name: The name of the tag to be added
:param enclave_id: ID of the enclave where the tag will be added
:param id_type: indicates whether the ID internal or an external ID provided by the user
:return: The ID of the tag that was created. |
def primary_avatar(user, size=AVATAR_DEFAULT_SIZE):
"""
This tag tries to get the default avatar for a user without doing any db
requests. It achieve this by linking to a special view that will do all the
work for us. If that special view is then cached by a CDN for instance,
we will avoid many db calls.
"""
alt = unicode(user)
url = reverse('avatar_render_primary', kwargs={'user' : user, 'size' : size})
return """<img src="%s" alt="%s" />""" % (url, alt,
) | This tag tries to get the default avatar for a user without doing any db
requests. It achieve this by linking to a special view that will do all the
work for us. If that special view is then cached by a CDN for instance,
we will avoid many db calls. |
def delete(self, personId):
"""Remove a person from the system.
Only an admin can remove a person.
Args:
personId(basestring): The ID of the person to be deleted.
Raises:
TypeError: If the parameter types are incorrect.
ApiError: If the Webex Teams cloud returns an error.
"""
check_type(personId, basestring, may_be_none=False)
# API request
self._session.delete(API_ENDPOINT + '/' + personId) | Remove a person from the system.
Only an admin can remove a person.
Args:
personId(basestring): The ID of the person to be deleted.
Raises:
TypeError: If the parameter types are incorrect.
ApiError: If the Webex Teams cloud returns an error. |
def parse_args(self):
"""Parse CLI args."""
self.tcex.log.info('Parsing Args.')
Args(self.tcex.parser)
self.args = self.tcex.args | Parse CLI args. |
def send_data_to_server(self, data, time_out=5):
"""
Sends given data to the Server.
:param data: Data to send.
:type data: unicode
:param time_out: Connection timeout in seconds.
:type time_out: float
:return: Method success.
:rtype: bool
"""
if not data.endswith(self.__connection_end):
data = "{0}{1}".format(data, foundations.strings.to_string(self.__connection_end).decode("string_escape"))
connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connection.settimeout(time_out)
connection.connect((foundations.strings.to_string(self.__address), int(self.__port)))
connection.send(data)
self.__engine.notifications_manager.notify(
"{0} | Socket connection command dispatched!".format(self.__class__.__name__))
connection.close()
return True | Sends given data to the Server.
:param data: Data to send.
:type data: unicode
:param time_out: Connection timeout in seconds.
:type time_out: float
:return: Method success.
:rtype: bool |
def terminate(self):
'''Kills the work unit.
This is called by the standard worker system, but only in
response to an operating system signal. If the job does setup
such as creating a child process, its terminate function
should kill that child process. More specifically, this
function requires the work spec to contain the keys
``module``, ``run_function``, and ``terminate_function``, and
calls ``terminate_function`` in :attr:`module` containing
:const:`self` as its only parameter.
'''
terminate_function_name = self.spec.get('terminate_function')
if not terminate_function_name:
logger.error('tried to terminate WorkUnit(%r) but no '
'function name', self.key)
return None
terminate_function = getattr(self.module,
self.spec['terminate_function'])
if not terminate_function:
logger.error('tried to terminate WorkUnit(%r) but no '
'function %s in module %r',
self.key, terminate_function_name,
self.module.__name__)
return None
logger.info('calling terminate function for work unit {0}'
.format(self.key))
ret_val = terminate_function(self)
self.update(lease_time=-10)
return ret_val | Kills the work unit.
This is called by the standard worker system, but only in
response to an operating system signal. If the job does setup
such as creating a child process, its terminate function
should kill that child process. More specifically, this
function requires the work spec to contain the keys
``module``, ``run_function``, and ``terminate_function``, and
calls ``terminate_function`` in :attr:`module` containing
:const:`self` as its only parameter. |
def transformer_moe_base():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.norm_type = "layer"
hparams.hidden_size = 512
hparams.batch_size = 4096
hparams.max_length = 2001
hparams.max_input_seq_length = 2000
hparams.max_target_seq_length = 2000
hparams.dropout = 0.0
hparams.clip_grad_norm = 0. # i.e. no gradient clipping
hparams.optimizer_adam_epsilon = 1e-9
hparams.learning_rate_decay_scheme = "noam"
hparams.learning_rate = 0.1
hparams.learning_rate_warmup_steps = 2000
hparams.initializer_gain = 1.0
hparams.num_hidden_layers = 5
hparams.initializer = "uniform_unit_scaling"
hparams.weight_decay = 0.0
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.98
hparams.num_sampled_classes = 0
hparams.label_smoothing = 0.0
hparams.shared_embedding_and_softmax_weights = True
# According to noam, ("n", "da") seems better for harder-to-learn models
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
# Hparams used by transformer_prepare_decoder() function
hparams.add_hparam("pos", "timing") # timing, none
hparams.add_hparam("proximity_bias", False)
hparams.add_hparam("causal_decoder_self_attention", True)
hparams = common_attention.add_standard_attention_hparams(hparams)
# Decoder layers type. If set, num_decoder_layers parameter will be ignored
# and the number of decoder layer will be deduced from the string
# See top file comment for example of usage
hparams.add_hparam("layer_types", "")
# Default attention type (ex: a, loc, red,...) and feed-forward type (ex: fc,
# sep, moe,...)
hparams.add_hparam("default_att", "a")
hparams.add_hparam("default_ff", "fc")
return hparams | Set of hyperparameters. |
def field_value(key, label, color, padding):
"""
Print a specific field's stats.
"""
if not clr.has_colors and padding > 0:
padding = 7
if color == "bright gray" or color == "dark gray":
bright_prefix = ""
else:
bright_prefix = "bright "
field = clr.stringc(key, "{0}{1}".format(bright_prefix, color))
field_label = clr.stringc(label, color)
return "{0:>{1}} {2}".format(field, padding, field_label) | Print a specific field's stats. |
def write_configuration(self, out, secret_attrs=False):
"""Generic configuration, may be overridden by type-specific version"""
key_order = ['name', 'path', 'git_dir', 'doc_dir', 'assumed_doc_version',
'git_ssh', 'pkey', 'has_aliases', 'number of collections']
cd = self.get_configuration_dict(secret_attrs=secret_attrs)
for k in key_order:
if k in cd:
out.write(' {} = {}'.format(k, cd[k]))
out.write(' collections in alias groups:\n')
for o in cd['collections']:
out.write(' {} ==> {}\n'.format(o['keys'], o['relpath'])) | Generic configuration, may be overridden by type-specific version |
def execute_cleanup_tasks(ctx, cleanup_tasks, dry_run=False):
"""Execute several cleanup tasks as part of the cleanup.
REQUIRES: ``clean(ctx, dry_run=False)`` signature in cleanup tasks.
:param ctx: Context object for the tasks.
:param cleanup_tasks: Collection of cleanup tasks (as Collection).
:param dry_run: Indicates dry-run mode (bool)
"""
# pylint: disable=redefined-outer-name
executor = Executor(cleanup_tasks, ctx.config)
for cleanup_task in cleanup_tasks.tasks:
print("CLEANUP TASK: %s" % cleanup_task)
executor.execute((cleanup_task, dict(dry_run=dry_run))) | Execute several cleanup tasks as part of the cleanup.
REQUIRES: ``clean(ctx, dry_run=False)`` signature in cleanup tasks.
:param ctx: Context object for the tasks.
:param cleanup_tasks: Collection of cleanup tasks (as Collection).
:param dry_run: Indicates dry-run mode (bool) |
def make_transformer(self, decompose='svd', decompose_by=50, tsne_kwargs={}):
"""
Creates an internal transformer pipeline to project the data set into
2D space using TSNE, applying an pre-decomposition technique ahead of
embedding if necessary. This method will reset the transformer on the
class, and can be used to explore different decompositions.
Parameters
----------
decompose : string or None, default: ``'svd'``
A preliminary decomposition is often used prior to TSNE to make
the projection faster. Specify ``"svd"`` for sparse data or ``"pca"``
for dense data. If decompose is None, the original data set will
be used.
decompose_by : int, default: 50
Specify the number of components for preliminary decomposition, by
default this is 50; the more components, the slower TSNE will be.
Returns
-------
transformer : Pipeline
Pipelined transformer for TSNE projections
"""
# TODO: detect decompose by inferring from sparse matrix or dense or
# If number of features > 50 etc.
decompositions = {
'svd': TruncatedSVD,
'pca': PCA,
}
if decompose and decompose.lower() not in decompositions:
raise YellowbrickValueError(
"'{}' is not a valid decomposition, use {}, or None".format(
decompose, ", ".join(decompositions.keys())
)
)
# Create the pipeline steps
steps = []
# Add the pre-decomposition
if decompose:
klass = decompositions[decompose]
steps.append((decompose, klass(
n_components=decompose_by, random_state=self.random_state)))
# Add the TSNE manifold
steps.append(('tsne', TSNE(
n_components=2, random_state=self.random_state, **tsne_kwargs)))
# return the pipeline
return Pipeline(steps) | Creates an internal transformer pipeline to project the data set into
2D space using TSNE, applying an pre-decomposition technique ahead of
embedding if necessary. This method will reset the transformer on the
class, and can be used to explore different decompositions.
Parameters
----------
decompose : string or None, default: ``'svd'``
A preliminary decomposition is often used prior to TSNE to make
the projection faster. Specify ``"svd"`` for sparse data or ``"pca"``
for dense data. If decompose is None, the original data set will
be used.
decompose_by : int, default: 50
Specify the number of components for preliminary decomposition, by
default this is 50; the more components, the slower TSNE will be.
Returns
-------
transformer : Pipeline
Pipelined transformer for TSNE projections |
Subsets and Splits