code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def render_template(self, template_name, out_path=None):
"""Render a template based on this TileBus Block.
The template has access to all of the attributes of this block as a
dictionary (the result of calling self.to_dict()).
You can optionally render to a file by passing out_path.
Args:
template_name (str): The name of the template to load. This must
be a file in config/templates inside this package
out_path (str): An optional path of where to save the output
file, otherwise it is just returned as a string.
Returns:
string: The rendered template data.
"""
return render_template(template_name, self.to_dict(), out_path=out_path) | Render a template based on this TileBus Block.
The template has access to all of the attributes of this block as a
dictionary (the result of calling self.to_dict()).
You can optionally render to a file by passing out_path.
Args:
template_name (str): The name of the template to load. This must
be a file in config/templates inside this package
out_path (str): An optional path of where to save the output
file, otherwise it is just returned as a string.
Returns:
string: The rendered template data. |
def files(self):
"""List of Phasics tif file names in the input zip file"""
if self._files is None:
self._files = SeriesZipTifPhasics._index_files(self.path)
return self._files | List of Phasics tif file names in the input zip file |
def discover_single_case(self, module, case_attributes):
"""Find and load a single TestCase or TestCase method from a module.
Parameters
----------
module : module
The imported Python module containing the TestCase to be
loaded.
case_attributes : list
A list (length 1 or 2) of str. The first component must be
the name of a TestCase subclass. The second component must
be the name of a method in the TestCase.
"""
# Find single case
case = module
loader = self._loader
for index, component in enumerate(case_attributes):
case = getattr(case, component, None)
if case is None:
return loader.create_suite()
elif loader.is_test_case(case):
rest = case_attributes[index + 1:]
if len(rest) > 1:
raise ValueError('Too many components in module path')
elif len(rest) == 1:
return loader.create_suite(
[loader.load_test(case, *rest)])
return loader.load_case(case)
# No cases matched, return empty suite
return loader.create_suite() | Find and load a single TestCase or TestCase method from a module.
Parameters
----------
module : module
The imported Python module containing the TestCase to be
loaded.
case_attributes : list
A list (length 1 or 2) of str. The first component must be
the name of a TestCase subclass. The second component must
be the name of a method in the TestCase. |
def _get_mean_deep_soil(self, mag, rake, rrup, is_reverse, imt):
"""
Calculate and return the mean intensity for deep soil sites.
Implements an equation from table 4.
"""
if mag <= self.NEAR_FIELD_SATURATION_MAG:
c4 = self.COEFFS_SOIL_IMT_INDEPENDENT['c4lowmag']
c5 = self.COEFFS_SOIL_IMT_INDEPENDENT['c5lowmag']
else:
c4 = self.COEFFS_SOIL_IMT_INDEPENDENT['c4himag']
c5 = self.COEFFS_SOIL_IMT_INDEPENDENT['c5himag']
c2 = self.COEFFS_SOIL_IMT_INDEPENDENT['c2']
c3 = self.COEFFS_SOIL_IMT_INDEPENDENT['c3']
C = self.COEFFS_SOIL[imt]
if is_reverse:
c1 = self.COEFFS_SOIL_IMT_INDEPENDENT['c1r']
c6 = C['c6r']
else:
c1 = self.COEFFS_SOIL_IMT_INDEPENDENT['c1ss']
c6 = C['c6ss']
# clip mag if greater than 8.5. This is to avoid
# ValueError: negative number cannot be raised to a fractional power
mag = 8.5 if mag > 8.5 else mag
return (c1 + c2 * mag + c6 + C['c7'] * ((8.5 - mag) ** 2.5)
- c3 * numpy.log(rrup + c4 * numpy.exp(c5 * mag))) | Calculate and return the mean intensity for deep soil sites.
Implements an equation from table 4. |
def to_bytes(self):
'''
Create bytes from properties
'''
# Verify that properties make sense
self.sanitize()
# Start with the type
bitstream = BitArray('uint:4=%d' % self.message_type)
# Add the flags
bitstream += BitArray('bool=%d, bool=%d, bool=%d'
% (self.probe,
self.enlra_enabled,
self.security))
# Add padding
bitstream += self._reserved1
# Add record count
bitstream += BitArray('uint:8=%d' % len(self.records))
# Add the nonce
nonce = bytes(self.nonce)
if len(nonce) < 8:
padding_len = 8 - len(nonce)
bitstream += BitArray(8 * padding_len)
bitstream += BitArray(bytes=nonce)
# Add the map-reply records
for record in self.records:
bitstream += record.to_bitstream()
# If the security flag is set then there should be security data here
# TODO: deal with security flag [LISP-Security]
if self.security:
raise NotImplementedError('Handling security data is not ' +
'implemented yet')
return bitstream.bytes | Create bytes from properties |
def open(self):
""" Search device on USB tree and set is as escpos device """
self.device = usb.core.find(idVendor=self.idVendor, idProduct=self.idProduct)
if self.device is None:
raise NoDeviceError()
try:
if self.device.is_kernel_driver_active(self.interface):
self.device.detach_kernel_driver(self.interface)
self.device.set_configuration()
usb.util.claim_interface(self.device, self.interface)
except usb.core.USBError as e:
raise HandleDeviceError(e) | Search device on USB tree and set is as escpos device |
def allowance (self, filename):
"""Preconditions:
- our agent applies to this entry
- filename is URL decoded
Check if given filename is allowed to acces this entry.
@return: True if allowed, else False
@rtype: bool
"""
for line in self.rulelines:
log.debug(LOG_CHECK, "%s %s %s", filename, str(line), line.allowance)
if line.applies_to(filename):
log.debug(LOG_CHECK, " ... rule line %s", line)
return line.allowance
log.debug(LOG_CHECK, " ... no rule lines of %s applied to %s; allowed.", self.useragents, filename)
return True | Preconditions:
- our agent applies to this entry
- filename is URL decoded
Check if given filename is allowed to acces this entry.
@return: True if allowed, else False
@rtype: bool |
def get_plugins(modules, classobj):
"""Find all class objects in all modules.
@param modules: the modules to search
@ptype modules: iterator of modules
@return: found classes
@rytpe: iterator of class objects
"""
for module in modules:
for plugin in get_module_plugins(module, classobj):
yield plugin | Find all class objects in all modules.
@param modules: the modules to search
@ptype modules: iterator of modules
@return: found classes
@rytpe: iterator of class objects |
def on_return(self, channel, method, properties, body):
"""Invoked by RabbitMQ when it returns a message that was published.
:param channel: The channel the message was delivered on
:type channel: pika.channel.Channel
:param method: The AMQP method frame
:type method: pika.frame.Frame
:param properties: The AMQP message properties
:type properties: pika.spec.Basic.Properties
:param bytes body: The message body
"""
pending = self.pending_confirmations()
if not pending: # Exit early if there are no pending messages
self.logger.warning('RabbitMQ returned message %s and no pending '
'messages are unconfirmed',
utils.message_info(method.exchange,
method.routing_key,
properties))
return
self.logger.warning('RabbitMQ returned message %s: (%s) %s',
utils.message_info(method.exchange,
method.routing_key, properties),
method.reply_code, method.reply_text)
# Try and match the exact message or first message published that
# matches the exchange and routing key
for offset, msg in pending:
if (msg.message_id == properties.message_id or
(msg.exchange == method.exchange and
msg.routing_key == method.routing_key)):
self.published_messages[offset].future.set_result(False)
return
# Handle the case where we only can go on message ordering
self.published_messages[0].future.set_result(False) | Invoked by RabbitMQ when it returns a message that was published.
:param channel: The channel the message was delivered on
:type channel: pika.channel.Channel
:param method: The AMQP method frame
:type method: pika.frame.Frame
:param properties: The AMQP message properties
:type properties: pika.spec.Basic.Properties
:param bytes body: The message body |
def getNextSample(self, V):
"""
Given a ranking over the candidates, generate a new ranking by assigning each candidate at
position i a Plakett-Luce weight of phi^i and draw a new ranking.
:ivar list<int> V: Contains integer representations of each candidate in order of their
ranking in a vote, from first to last.
"""
W, WProb = self.drawRankingPlakettLuce(V)
VProb = self.calcProbOfVFromW(V, W)
acceptanceRatio = self.calcAcceptanceRatio(V, W)
prob = min(1.0, acceptanceRatio * (VProb/WProb))
if random.random() <= prob:
V = W
return V | Given a ranking over the candidates, generate a new ranking by assigning each candidate at
position i a Plakett-Luce weight of phi^i and draw a new ranking.
:ivar list<int> V: Contains integer representations of each candidate in order of their
ranking in a vote, from first to last. |
def factors(self):
"""
Access the factors
:returns: twilio.rest.authy.v1.service.entity.factor.FactorList
:rtype: twilio.rest.authy.v1.service.entity.factor.FactorList
"""
if self._factors is None:
self._factors = FactorList(
self._version,
service_sid=self._solution['service_sid'],
identity=self._solution['identity'],
)
return self._factors | Access the factors
:returns: twilio.rest.authy.v1.service.entity.factor.FactorList
:rtype: twilio.rest.authy.v1.service.entity.factor.FactorList |
def _add_deprecated_function_notice_to_docstring(doc, date, instructions):
"""Adds a deprecation notice to a docstring for deprecated functions."""
if instructions:
deprecation_message = """
.. warning::
**THIS FUNCTION IS DEPRECATED:** It will be removed after %s.
*Instructions for updating:* %s.
""" % (('in a future version' if date is None else ('after %s' % date)), instructions)
else:
deprecation_message = """
.. warning::
**THIS FUNCTION IS DEPRECATED:** It will be removed after %s.
""" % (('in a future version' if date is None else ('after %s' % date)))
main_text = [deprecation_message]
return _add_notice_to_docstring(doc, 'DEPRECATED FUNCTION', main_text) | Adds a deprecation notice to a docstring for deprecated functions. |
def openSafeReplace(filepath, mode='w+b'):
"""Context manager to open a temporary file and replace the original file on
closing.
"""
tempfileName = None
#Check if the filepath can be accessed and is writable before creating the
#tempfile
if not _isFileAccessible(filepath):
raise IOError('File %s is not writtable' % (filepath, ))
with tempfile.NamedTemporaryFile(delete=False, mode=mode) as tmpf:
tempfileName = tmpf.name
yield tmpf
#Check if the filepath can be accessed and is writable before moving the
#tempfile
if not _isFileAccessible(filepath):
raise IOError('File %s is not writtable' % (filepath, ))
#Note: here unhandled exceptions may still occur because of race conditions,
#messing things up.
shutil.move(tempfileName, filepath) | Context manager to open a temporary file and replace the original file on
closing. |
def _getDefaultCombinedL4Params(self, numInputBits, inputSize,
numExternalInputBits, externalInputSize,
L2CellCount):
"""
Returns a good default set of parameters to use in a combined L4 region.
"""
sampleSize = numExternalInputBits + numInputBits
activationThreshold = int(max(numExternalInputBits, numInputBits) * .6)
minThreshold = activationThreshold
return {
"columnCount": inputSize,
"cellsPerColumn": 16,
"learn": True,
"learnOnOneCell": False,
"initialPermanence": 0.41,
"connectedPermanence": 0.6,
"permanenceIncrement": 0.1,
"permanenceDecrement": 0.02,
"minThreshold": minThreshold,
"basalPredictedSegmentDecrement": 0.001,
"apicalPredictedSegmentDecrement": 0.0,
"reducedBasalThreshold": int(activationThreshold*0.6),
"activationThreshold": activationThreshold,
"sampleSize": sampleSize,
"implementation": "ApicalTiebreak",
"seed": self.seed,
"basalInputWidth": inputSize*16 + externalInputSize,
"apicalInputWidth": L2CellCount,
} | Returns a good default set of parameters to use in a combined L4 region. |
def _handle_authentication_error(self):
"""
Return an authentication error.
"""
response = make_response('Access Denied')
response.headers['WWW-Authenticate'] = self.auth.get_authenticate_header()
response.status_code = 401
return response | Return an authentication error. |
def page_view(url):
"""
Page view decorator.
Put that around a state handler function in order to log a page view each
time the handler gets called.
:param url: simili-URL that you want to give to the state
"""
def decorator(func):
@wraps(func)
async def wrapper(self: BaseState, *args, **kwargs):
user_id = self.request.user.id
try:
user_lang = await self.request.user.get_locale()
except NotImplementedError:
user_lang = ''
title = self.__class__.__name__
# noinspection PyTypeChecker
async for p in providers():
await p.page_view(url, title, user_id, user_lang)
return await func(self, *args, **kwargs)
return wrapper
return decorator | Page view decorator.
Put that around a state handler function in order to log a page view each
time the handler gets called.
:param url: simili-URL that you want to give to the state |
def male_breeding_location_type(self):
"""This attribute defines whether a breeding male's current location is the same as the breeding cage.
This attribute is used to color breeding table entries such that male mice which are currently in a different cage can quickly be identified."""
if int(self.Male.all()[0].Cage) == int(self.Cage):
type = "resident breeder"
else:
type = "non-resident breeder"
return type | This attribute defines whether a breeding male's current location is the same as the breeding cage.
This attribute is used to color breeding table entries such that male mice which are currently in a different cage can quickly be identified. |
def sync_agg_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the associated weeks and months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
self.sync_week_metric(unique_identifier, metric, start_date, end_date)
self.sync_month_metric(unique_identifier, metric, start_date, end_date) | Uses the count for each day in the date range to recalculate the counters for the associated weeks and months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end |
def _data_update(subjects, queue, run_flag):
"""
Get data from backgound process and notify all subscribed observers with the new data
"""
while run_flag.running:
while not queue.empty():
data = queue.get()
for subject in [s for s in subjects if not s.is_disposed]:
subject.on_next(data)
time.sleep(0.1) | Get data from backgound process and notify all subscribed observers with the new data |
def iter_work_specs(self, limit=None, start=None):
'''
yield work spec dicts
'''
count = 0
ws_list, start = self.list_work_specs(limit, start)
while True:
for name_spec in ws_list:
yield name_spec[1]
count += 1
if (limit is not None) and (count >= limit):
break
if not start:
break
if limit is not None:
limit -= count
ws_list, start = self.list_work_specs(limit, start) | yield work spec dicts |
def clear_context(pid_file):
"""Called at exit. Delete the context file to signal there is no active notebook.
We don't delete the whole file, but leave it around for debugging purposes. Maybe later we want to pass some information back to the web site.
"""
return
raise RuntimeError("Should not happen")
fname = get_context_file_name(pid_file)
shutil.move(fname, fname.replace("context.json", "context.old.json"))
data = {}
data["terminated"] = str(datetime.datetime.now(datetime.timezone.utc))
set_context(pid_file, data) | Called at exit. Delete the context file to signal there is no active notebook.
We don't delete the whole file, but leave it around for debugging purposes. Maybe later we want to pass some information back to the web site. |
def replace_cluster_custom_object(self, group, version, plural, name, body, **kwargs): # noqa: E501
"""replace_cluster_custom_object # noqa: E501
replace the specified cluster scoped custom object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_cluster_custom_object(group, version, plural, name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str group: the custom resource's group (required)
:param str version: the custom resource's version (required)
:param str plural: the custom object's plural name. For TPRs this would be lowercase plural kind. (required)
:param str name: the custom object's name (required)
:param UNKNOWN_BASE_TYPE body: The JSON schema of the Resource to replace. (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_cluster_custom_object_with_http_info(group, version, plural, name, body, **kwargs) # noqa: E501
else:
(data) = self.replace_cluster_custom_object_with_http_info(group, version, plural, name, body, **kwargs) # noqa: E501
return data | replace_cluster_custom_object # noqa: E501
replace the specified cluster scoped custom object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_cluster_custom_object(group, version, plural, name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str group: the custom resource's group (required)
:param str version: the custom resource's version (required)
:param str plural: the custom object's plural name. For TPRs this would be lowercase plural kind. (required)
:param str name: the custom object's name (required)
:param UNKNOWN_BASE_TYPE body: The JSON schema of the Resource to replace. (required)
:return: object
If the method is called asynchronously,
returns the request thread. |
def get_case(word, correction):
"""
Best guess of intended case.
manchester => manchester
chilton => Chilton
AAvTech => AAvTech
THe => The
imho => IMHO
"""
if word.istitle():
return correction.title()
if word.isupper():
return correction.upper()
if correction == word and not word.islower():
return word
if len(word) > 2 and word[:2].isupper():
return correction.title()
if not known_as_lower([correction]): #expensive
try:
return CASE_MAPPED[correction]
except KeyError:
pass
return correction | Best guess of intended case.
manchester => manchester
chilton => Chilton
AAvTech => AAvTech
THe => The
imho => IMHO |
def get(cls, name=__name__):
"""Return a Mapper instance with the given name.
If the name already exist return its instance.
Does not work if a Mapper was created via its constructor.
Using `Mapper.get()`_ is the prefered way.
Args:
name (str, optional): Name for the newly created instance.
Defaults to `__name__`.
Returns:
Mapper: A mapper instance for the given name.
Raises:
TypeError: If a invalid name was given.
"""
if not isinstance(name, str):
raise TypeError('A mapper name must be a string')
if name not in cls.__instances:
cls.__instances[name] = cls()
cls.__instances[name]._name = name
return cls.__instances[name] | Return a Mapper instance with the given name.
If the name already exist return its instance.
Does not work if a Mapper was created via its constructor.
Using `Mapper.get()`_ is the prefered way.
Args:
name (str, optional): Name for the newly created instance.
Defaults to `__name__`.
Returns:
Mapper: A mapper instance for the given name.
Raises:
TypeError: If a invalid name was given. |
def _set_log_level(self):
"""
Inspects config and sets the log level as instance attr. If not present
in config, default is "INFO".
"""
# set log level on logger
log_level = "INFO"
if hasattr(self._config, "level") and self._config.level.upper() in ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]:
log_level = self._config.level.upper()
self._log_level = log_level | Inspects config and sets the log level as instance attr. If not present
in config, default is "INFO". |
async def _dump_container_val(self, writer, elem, container_type, params=None):
"""
Single elem dump
:param writer:
:param elem:
:param container_type:
:param params:
:return:
"""
elem_type = container_elem_type(container_type, params)
await self.dump_field(writer, elem, elem_type, params[1:] if params else None) | Single elem dump
:param writer:
:param elem:
:param container_type:
:param params:
:return: |
def max_date(self, symbol):
"""
Return the maximum datetime stored for a particular symbol
Parameters
----------
symbol : `str`
symbol name for the item
"""
res = self._collection.find_one({SYMBOL: symbol}, projection={ID: 0, END: 1},
sort=[(START, pymongo.DESCENDING)])
if res is None:
raise NoDataFoundException("No Data found for {}".format(symbol))
return utc_dt_to_local_dt(res[END]) | Return the maximum datetime stored for a particular symbol
Parameters
----------
symbol : `str`
symbol name for the item |
def select_action(self, state):
"""
Select the best action for the given state using e-greedy exploration to
minimize overfitting
:return: tuple(action, value)
"""
value = 0
if self.steps < self.min_steps:
action = np.random.randint(self.actions)
else:
self.eps = max(self.eps_end, self.eps * self.eps_decay)
if random.random() < self.eps:
action = np.random.randint(self.actions)
else:
self.local.eval()
with torch.no_grad():
state = torch.tensor(state, device=self.device, dtype=torch.float).unsqueeze(0)
Q = self.local(state)
value, action = torch.max(Q, 1)
return int(action), float(value) | Select the best action for the given state using e-greedy exploration to
minimize overfitting
:return: tuple(action, value) |
def render_er(input, output, mode='auto', include_tables=None, include_columns=None,
exclude_tables=None, exclude_columns=None, schema=None):
"""
Transform the metadata into a representation.
:param input: Possible inputs are instances of:
MetaData: SQLAlchemy Metadata
DeclarativeMeta: SQLAlchemy declarative Base
:param output: name of the file to output the
:param mode: str in list:
'er': writes to a file the markup to generate an ER style diagram.
'graph': writes the image of the ER diagram.
'dot': write to file the diagram in dot format.
'auto': choose from the filename:
'*.er': writes to a file the markup to generate an ER style diagram.
'.dot': returns the graph in the dot syntax.
else: return a graph to the format graph
:param include_tables: lst of str, table names to include, None means include all
:param include_columns: lst of str, column names to include, None means include all
:param exclude_tables: lst of str, table names to exclude, None means exclude nothing
:param exclude_columns: lst of str, field names to exclude, None means exclude nothing
:param schema: name of the schema
"""
try:
tables, relationships = all_to_intermediary(input, schema=schema)
tables, relationships = filter_resources(tables, relationships,
include_tables=include_tables, include_columns=include_columns,
exclude_tables=exclude_tables, exclude_columns=exclude_columns)
intermediary_to_output = get_output_mode(output, mode)
intermediary_to_output(tables, relationships, output)
except ImportError as e:
module_name = e.message.split()[-1]
print('Please install {0} using "pip install {0}".'.format(module_name))
except ParsingException as e:
sys.stderr.write(e.message) | Transform the metadata into a representation.
:param input: Possible inputs are instances of:
MetaData: SQLAlchemy Metadata
DeclarativeMeta: SQLAlchemy declarative Base
:param output: name of the file to output the
:param mode: str in list:
'er': writes to a file the markup to generate an ER style diagram.
'graph': writes the image of the ER diagram.
'dot': write to file the diagram in dot format.
'auto': choose from the filename:
'*.er': writes to a file the markup to generate an ER style diagram.
'.dot': returns the graph in the dot syntax.
else: return a graph to the format graph
:param include_tables: lst of str, table names to include, None means include all
:param include_columns: lst of str, column names to include, None means include all
:param exclude_tables: lst of str, table names to exclude, None means exclude nothing
:param exclude_columns: lst of str, field names to exclude, None means exclude nothing
:param schema: name of the schema |
def pool_function(p, nick, rutaDescarga, avoidProcessing=True, avoidDownload=True, verbosity=1):
"""
Wrapper for being able to launch all the threads of getPageWrapper.
We receive the parameters for getPageWrapper as a tuple.
Args:
-----
pName: Platform where the information is stored. It is a string.
nick: Nick to be searched.
rutaDescarga: Local file where saving the obtained information.
avoidProcessing: Boolean var that defines whether the profiles will NOT
be processed (stored in this version).
avoidDownload: Boolean var that defines whether the profiles will NOT be
downloaded (stored in this version).
verbosity: The verbosity level: 1, shows errors; 2, shows warnings.
Return:
-------
A dictionary with the following structure:
{
"platform": "Platform",
"status": "DONE",
"data": "<data>"
}
Data is None or a serialized representation of the dictionary.
"""
try:
#res = getPageWrapper(p, nick, rutaDescarga, avoidProcessing, avoidDownload, outQueue)
res = p.getInfo(
query=nick,
mode="usufy",
process=True
)
return {"platform" : str(p), "status": "Ok", "data": res}
except Exception as e:
if (isinstance(e, OSRFrameworkError) and verbosity >= 1) and (isinstance(e, OSRFrameworkException) and verbosity >= 2):
print(str(e))
return {"platform" : str(p), "status": e, "data": e.generic} | Wrapper for being able to launch all the threads of getPageWrapper.
We receive the parameters for getPageWrapper as a tuple.
Args:
-----
pName: Platform where the information is stored. It is a string.
nick: Nick to be searched.
rutaDescarga: Local file where saving the obtained information.
avoidProcessing: Boolean var that defines whether the profiles will NOT
be processed (stored in this version).
avoidDownload: Boolean var that defines whether the profiles will NOT be
downloaded (stored in this version).
verbosity: The verbosity level: 1, shows errors; 2, shows warnings.
Return:
-------
A dictionary with the following structure:
{
"platform": "Platform",
"status": "DONE",
"data": "<data>"
}
Data is None or a serialized representation of the dictionary. |
def create(self, friendly_name=values.unset, domain_name=values.unset,
disaster_recovery_url=values.unset,
disaster_recovery_method=values.unset, recording=values.unset,
secure=values.unset, cnam_lookup_enabled=values.unset):
"""
Create a new TrunkInstance
:param unicode friendly_name: A string to describe the resource
:param unicode domain_name: The unique address you reserve on Twilio to which you route your SIP traffic
:param unicode disaster_recovery_url: The HTTP URL that we should call if an error occurs while sending SIP traffic towards your configured Origination URL
:param unicode disaster_recovery_method: The HTTP method we should use to call the disaster_recovery_url
:param TrunkInstance.RecordingSetting recording: The recording settings for the trunk
:param bool secure: Whether Secure Trunking is enabled for the trunk
:param bool cnam_lookup_enabled: Whether Caller ID Name (CNAM) lookup should be enabled for the trunk
:returns: Newly created TrunkInstance
:rtype: twilio.rest.trunking.v1.trunk.TrunkInstance
"""
data = values.of({
'FriendlyName': friendly_name,
'DomainName': domain_name,
'DisasterRecoveryUrl': disaster_recovery_url,
'DisasterRecoveryMethod': disaster_recovery_method,
'Recording': recording,
'Secure': secure,
'CnamLookupEnabled': cnam_lookup_enabled,
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return TrunkInstance(self._version, payload, ) | Create a new TrunkInstance
:param unicode friendly_name: A string to describe the resource
:param unicode domain_name: The unique address you reserve on Twilio to which you route your SIP traffic
:param unicode disaster_recovery_url: The HTTP URL that we should call if an error occurs while sending SIP traffic towards your configured Origination URL
:param unicode disaster_recovery_method: The HTTP method we should use to call the disaster_recovery_url
:param TrunkInstance.RecordingSetting recording: The recording settings for the trunk
:param bool secure: Whether Secure Trunking is enabled for the trunk
:param bool cnam_lookup_enabled: Whether Caller ID Name (CNAM) lookup should be enabled for the trunk
:returns: Newly created TrunkInstance
:rtype: twilio.rest.trunking.v1.trunk.TrunkInstance |
def pci_contents(self, use_dict=None, as_class=dict):
"""Return the contents of an object as a dict."""
if _debug: PCI._debug("pci_contents use_dict=%r as_class=%r", use_dict, as_class)
# make/extend the dictionary of content
if use_dict is None:
use_dict = as_class()
# save the values
for k, v in (('user_data', self.pduUserData), ('source', self.pduSource), ('destination', self.pduDestination)):
if _debug: PCI._debug(" - %r: %r", k, v)
if v is None:
continue
if hasattr(v, 'dict_contents'):
v = v.dict_contents(as_class=as_class)
use_dict.__setitem__(k, v)
# return what we built/updated
return use_dict | Return the contents of an object as a dict. |
def coredump_configured(name, enabled, dump_ip, host_vnic='vmk0', dump_port=6500):
'''
Ensures a host's core dump configuration.
name
Name of the state.
enabled
Sets whether or not ESXi core dump collection should be enabled.
This is a boolean value set to ``True`` or ``False`` to enable
or disable core dumps.
Note that ESXi requires that the core dump must be enabled before
any other parameters may be set. This also affects the ``changes``
results in the state return dictionary. If ``enabled`` is ``False``,
we can't obtain any previous settings to compare other state variables,
resulting in many ``old`` references returning ``None``.
Once ``enabled`` is ``True`` the ``changes`` dictionary comparisons
will be more accurate. This is due to the way the system coredemp
network configuration command returns data.
dump_ip
The IP address of host that will accept the dump.
host_vnic
Host VNic port through which to communicate. Defaults to ``vmk0``.
dump_port
TCP port to use for the dump. Defaults to ``6500``.
Example:
.. code-block:: yaml
configure-host-coredump:
esxi.coredump_configured:
- enabled: True
- dump_ip: 'my-coredump-ip.example.com'
'''
ret = {'name': name,
'result': False,
'changes': {},
'comment': ''}
esxi_cmd = 'esxi.cmd'
enabled_msg = 'ESXi requires that the core dump must be enabled ' \
'before any other parameters may be set.'
host = __pillar__['proxy']['host']
current_config = __salt__[esxi_cmd]('get_coredump_network_config').get(host)
error = current_config.get('Error')
if error:
ret['comment'] = 'Error: {0}'.format(error)
return ret
current_config = current_config.get('Coredump Config')
current_enabled = current_config.get('enabled')
# Configure coredump enabled state, if there are changes.
if current_enabled != enabled:
enabled_changes = {'enabled': {'old': current_enabled, 'new': enabled}}
# Only run the command if not using test=True
if not __opts__['test']:
response = __salt__[esxi_cmd]('coredump_network_enable',
enabled=enabled).get(host)
error = response.get('Error')
if error:
ret['comment'] = 'Error: {0}'.format(error)
return ret
# Allow users to disable core dump, but then return since
# nothing else can be set if core dump is disabled.
if not enabled:
ret['result'] = True
ret['comment'] = enabled_msg
ret['changes'].update(enabled_changes)
return ret
ret['changes'].update(enabled_changes)
elif not enabled:
# If current_enabled and enabled match, but are both False,
# We must return before configuring anything. This isn't a
# failure as core dump may be disabled intentionally.
ret['result'] = True
ret['comment'] = enabled_msg
return ret
# Test for changes with all remaining configurations. The changes flag is used
# To detect changes, and then set_coredump_network_config is called one time.
changes = False
current_ip = current_config.get('ip')
if current_ip != dump_ip:
ret['changes'].update({'dump_ip':
{'old': current_ip,
'new': dump_ip}})
changes = True
current_vnic = current_config.get('host_vnic')
if current_vnic != host_vnic:
ret['changes'].update({'host_vnic':
{'old': current_vnic,
'new': host_vnic}})
changes = True
current_port = current_config.get('port')
if current_port != six.text_type(dump_port):
ret['changes'].update({'dump_port':
{'old': current_port,
'new': six.text_type(dump_port)}})
changes = True
# Only run the command if not using test=True and changes were detected.
if not __opts__['test'] and changes is True:
response = __salt__[esxi_cmd]('set_coredump_network_config',
dump_ip=dump_ip,
host_vnic=host_vnic,
dump_port=dump_port).get(host)
if response.get('success') is False:
msg = response.get('stderr')
if not msg:
msg = response.get('stdout')
ret['comment'] = 'Error: {0}'.format(msg)
return ret
ret['result'] = True
if ret['changes'] == {}:
ret['comment'] = 'Core Dump configuration is already in the desired state.'
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Core dump configuration will change.'
return ret | Ensures a host's core dump configuration.
name
Name of the state.
enabled
Sets whether or not ESXi core dump collection should be enabled.
This is a boolean value set to ``True`` or ``False`` to enable
or disable core dumps.
Note that ESXi requires that the core dump must be enabled before
any other parameters may be set. This also affects the ``changes``
results in the state return dictionary. If ``enabled`` is ``False``,
we can't obtain any previous settings to compare other state variables,
resulting in many ``old`` references returning ``None``.
Once ``enabled`` is ``True`` the ``changes`` dictionary comparisons
will be more accurate. This is due to the way the system coredemp
network configuration command returns data.
dump_ip
The IP address of host that will accept the dump.
host_vnic
Host VNic port through which to communicate. Defaults to ``vmk0``.
dump_port
TCP port to use for the dump. Defaults to ``6500``.
Example:
.. code-block:: yaml
configure-host-coredump:
esxi.coredump_configured:
- enabled: True
- dump_ip: 'my-coredump-ip.example.com' |
def solve2x2(lhs, rhs):
"""Solve a square 2 x 2 system via LU factorization.
This is meant to be a stand-in for LAPACK's ``dgesv``, which just wraps
two calls to ``dgetrf`` and ``dgetrs``. We wrap for two reasons:
* We seek to avoid exceptions as part of the control flow (which is
what :func`numpy.linalg.solve` does).
* We seek to avoid excessive type- and size-checking, since this
special case is already known.
Args:
lhs (numpy.ndarray) A ``2 x 2`` array of real numbers.
rhs (numpy.ndarray) A 1D array of 2 real numbers.
Returns:
Tuple[bool, float, float]: A triple of
* A flag indicating if ``lhs`` is a singular matrix.
* The first component of the solution.
* The second component of the solution.
"""
# A <--> lhs[0, 0]
# B <--> lhs[0, 1]
# C <--> lhs[1, 0]
# D <--> lhs[1, 1]
# E <--> rhs[0]
# F <--> rhs[1]
if np.abs(lhs[1, 0]) > np.abs(lhs[0, 0]):
# NOTE: We know there is no division by zero here since ``C``
# is **strictly** bigger than **some** value (in magnitude).
# [A | B][x] = [E]
# [C | D][y] [F]
ratio = lhs[0, 0] / lhs[1, 0]
# r = A / C
# [A - rC | B - rD][x] [E - rF]
# [C | D ][y] = [F ]
# ==> 0x + (B - rD) y = E - rF
denominator = lhs[0, 1] - ratio * lhs[1, 1]
if denominator == 0.0:
return True, None, None
y_val = (rhs[0] - ratio * rhs[1]) / denominator
# Cx + Dy = F ==> x = (F - Dy) / C
x_val = (rhs[1] - lhs[1, 1] * y_val) / lhs[1, 0]
return False, x_val, y_val
else:
if lhs[0, 0] == 0.0:
return True, None, None
# [A | B][x] = [E]
# [C | D][y] [F]
ratio = lhs[1, 0] / lhs[0, 0]
# r = C / A
# [A | B ][x] = [E ]
# [C - rA | D - rB][y] [F - rE]
# ==> 0x + (D - rB) y = F - rE
denominator = lhs[1, 1] - ratio * lhs[0, 1]
if denominator == 0.0:
return True, None, None
y_val = (rhs[1] - ratio * rhs[0]) / denominator
# Ax + By = E ==> x = (E - B y) / A
x_val = (rhs[0] - lhs[0, 1] * y_val) / lhs[0, 0]
return False, x_val, y_val | Solve a square 2 x 2 system via LU factorization.
This is meant to be a stand-in for LAPACK's ``dgesv``, which just wraps
two calls to ``dgetrf`` and ``dgetrs``. We wrap for two reasons:
* We seek to avoid exceptions as part of the control flow (which is
what :func`numpy.linalg.solve` does).
* We seek to avoid excessive type- and size-checking, since this
special case is already known.
Args:
lhs (numpy.ndarray) A ``2 x 2`` array of real numbers.
rhs (numpy.ndarray) A 1D array of 2 real numbers.
Returns:
Tuple[bool, float, float]: A triple of
* A flag indicating if ``lhs`` is a singular matrix.
* The first component of the solution.
* The second component of the solution. |
def configure_savedsearch(self, ns, definition):
"""
Register a saved search endpoint.
The definition's func should be a search function, which must:
- accept kwargs for the request data
- return a tuple of (items, count) where count is the total number of items
available (in the case of pagination)
The definition's request_schema will be used to process query string arguments.
:param ns: the namespace
:param definition: the endpoint definition
"""
paginated_list_schema = self.page_cls.make_paginated_list_schema_class(
ns,
definition.response_schema,
)()
@self.add_route(ns.collection_path, Operation.SavedSearch, ns)
@request(definition.request_schema)
@response(paginated_list_schema)
@wraps(definition.func)
def saved_search(**path_data):
request_data = load_request_data(definition.request_schema)
page = self.page_cls.from_dict(request_data)
request_data.update(page.to_dict(func=identity))
result = definition.func(**merge_data(path_data, request_data))
response_data, headers = page.to_paginated_list(result, ns, Operation.SavedSearch)
definition.header_func(headers, response_data)
response_format = self.negotiate_response_content(definition.response_formats)
return dump_response_data(
paginated_list_schema,
response_data,
headers=headers,
response_format=response_format,
)
saved_search.__doc__ = "Persist and return the search results of {}".format(pluralize(ns.subject_name)) | Register a saved search endpoint.
The definition's func should be a search function, which must:
- accept kwargs for the request data
- return a tuple of (items, count) where count is the total number of items
available (in the case of pagination)
The definition's request_schema will be used to process query string arguments.
:param ns: the namespace
:param definition: the endpoint definition |
def _setVirtualEnv():
"""Attempt to set the virtualenv activate command, if it hasn't been specified.
"""
try:
activate = options.virtualenv.activate_cmd
except AttributeError:
activate = None
if activate is None:
virtualenv = path(os.environ.get('VIRTUAL_ENV', ''))
if not virtualenv:
virtualenv = options.paved.cwd
else:
virtualenv = path(virtualenv)
activate = virtualenv / 'bin' / 'activate'
if activate.exists():
info('Using default virtualenv at %s' % activate)
options.setdotted('virtualenv.activate_cmd', 'source %s' % activate) | Attempt to set the virtualenv activate command, if it hasn't been specified. |
def split_iterable_as_unordered_iterable(self, values):
"""Group iterable into iterables, without regard for the ordering of self.index.unique
key-group tuples are yielded as soon as they are complete
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
tuple of key, and a list of corresponding items in values
Notes
-----
This approach is lazy, insofar as grouped values are close in their iterable
"""
from collections import defaultdict
cache = defaultdict(list)
count = self.count
unique = self.unique
key = (lambda i: unique[i]) if isinstance(unique, np.ndarray) else (lambda i: tuple(c[i] for c in unique))
for i,v in zip(self.inverse, values):
cache[i].append(v)
if len(cache[i]) == count[i]:
yield key(i), cache.pop(i) | Group iterable into iterables, without regard for the ordering of self.index.unique
key-group tuples are yielded as soon as they are complete
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
tuple of key, and a list of corresponding items in values
Notes
-----
This approach is lazy, insofar as grouped values are close in their iterable |
def toc_html(self):
"""Return the HTML for the current TOC.
This expects the `_toc` attribute to have been set on this instance.
"""
if self._toc is None:
return None
def indent():
return ' ' * (len(h_stack) - 1)
lines = []
h_stack = [0] # stack of header-level numbers
for level, id, name in self._toc:
if level > h_stack[-1]:
lines.append("%s<ul>" % indent())
h_stack.append(level)
elif level == h_stack[-1]:
lines[-1] += "</li>"
else:
while level < h_stack[-1]:
h_stack.pop()
if not lines[-1].endswith("</li>"):
lines[-1] += "</li>"
lines.append("%s</ul></li>" % indent())
lines.append('%s<li><a href="#%s">%s</a>' % (
indent(), id, name))
while len(h_stack) > 1:
h_stack.pop()
if not lines[-1].endswith("</li>"):
lines[-1] += "</li>"
lines.append("%s</ul>" % indent())
return '\n'.join(lines) + '\n' | Return the HTML for the current TOC.
This expects the `_toc` attribute to have been set on this instance. |
def sendcontrol(self, char):
'''Helper method that wraps send() with mnemonic access for sending control
character to the child (such as Ctrl-C or Ctrl-D). For example, to send
Ctrl-G (ASCII 7, bell, '\a')::
child.sendcontrol('g')
See also, sendintr() and sendeof().
'''
n, byte = self.ptyproc.sendcontrol(char)
self._log_control(byte)
return n | Helper method that wraps send() with mnemonic access for sending control
character to the child (such as Ctrl-C or Ctrl-D). For example, to send
Ctrl-G (ASCII 7, bell, '\a')::
child.sendcontrol('g')
See also, sendintr() and sendeof(). |
def from_str(string, max_number=9, separator="."):
"""Parses string
:param string: Version
:param max_number: Max number reachable by sub
:param separator: Version numbers are separated with this split
:return: Parses string and returns object
"""
tokens = string.split(separator)
tokens = list(reversed(tokens)) # reverse order of importance
most_important = tokens[-1] # cannot be parsed like the others
levels = [
Level(max_number, int(token)) for token in tokens[:-1]
]
levels.append(
Level(float("inf"), int(most_important))
)
return Subsystem(levels, separator) | Parses string
:param string: Version
:param max_number: Max number reachable by sub
:param separator: Version numbers are separated with this split
:return: Parses string and returns object |
def _image_size(image_config, type_, target_size):
"""Find the closest available size for specified image type.
Arguments:
image_config (:py:class:`dict`): The image config data.
type_ (:py:class:`str`): The type of image to create a URL
for, (``'poster'`` or ``'profile'``).
target_size (:py:class:`int`): The size of image to aim for (used
as either width or height).
"""
return min(
image_config['{}_sizes'.format(type_)],
key=lambda size: (abs(target_size - int(size[1:]))
if size.startswith('w') or size.startswith('h')
else 999),
) | Find the closest available size for specified image type.
Arguments:
image_config (:py:class:`dict`): The image config data.
type_ (:py:class:`str`): The type of image to create a URL
for, (``'poster'`` or ``'profile'``).
target_size (:py:class:`int`): The size of image to aim for (used
as either width or height). |
def linop_scale(w, op):
"""Creates weighted `LinOp` from existing `LinOp`."""
# We assume w > 0. (This assumption only relates to the is_* attributes.)
with tf.name_scope("linop_scale"):
# TODO(b/35301104): LinearOperatorComposition doesn't combine operators, so
# special case combinations here. Once it does, this function can be
# replaced by:
# return linop_composition_lib.LinearOperatorComposition([
# scaled_identity(w), op])
def scaled_identity(w):
return tf.linalg.LinearOperatorScaledIdentity(
num_rows=op.range_dimension_tensor(),
multiplier=w,
is_non_singular=op.is_non_singular,
is_self_adjoint=op.is_self_adjoint,
is_positive_definite=op.is_positive_definite)
if isinstance(op, tf.linalg.LinearOperatorIdentity):
return scaled_identity(w)
if isinstance(op, tf.linalg.LinearOperatorScaledIdentity):
return scaled_identity(w * op.multiplier)
if isinstance(op, tf.linalg.LinearOperatorDiag):
return tf.linalg.LinearOperatorDiag(
diag=w[..., tf.newaxis] * op.diag_part(),
is_non_singular=op.is_non_singular,
is_self_adjoint=op.is_self_adjoint,
is_positive_definite=op.is_positive_definite)
if isinstance(op, tf.linalg.LinearOperatorLowerTriangular):
return tf.linalg.LinearOperatorLowerTriangular(
tril=w[..., tf.newaxis, tf.newaxis] * op.to_dense(),
is_non_singular=op.is_non_singular,
is_self_adjoint=op.is_self_adjoint,
is_positive_definite=op.is_positive_definite)
raise NotImplementedError(
"Unsupported Linop type ({})".format(type(op).__name__)) | Creates weighted `LinOp` from existing `LinOp`. |
def cli(ctx, verbose, fake, install, uninstall, config):
"""legit command line interface"""
# Create a repo object and remember it as as the context object. From
# this point onwards other commands can refer to it by using the
# @pass_scm decorator.
ctx.obj = SCMRepo()
ctx.obj.fake = fake
ctx.obj.verbose = fake or verbose
if install:
do_install(ctx, verbose, fake)
ctx.exit()
elif uninstall:
do_uninstall(ctx, verbose, fake)
ctx.exit()
elif config:
do_edit_settings(fake)
ctx.exit()
else:
if ctx.invoked_subcommand is None:
# Display help to user if no commands were passed.
click.echo(format_help(ctx.get_help())) | legit command line interface |
def create_new(self, body):
"""Configure a new custom domain
Args:
body (str): The domain, tye and verification method in json
See: https://auth0.com/docs/api/management/v2#!/Custom_Domains/post_custom_domains
"""
return self.client.post(self._url(), data=body) | Configure a new custom domain
Args:
body (str): The domain, tye and verification method in json
See: https://auth0.com/docs/api/management/v2#!/Custom_Domains/post_custom_domains |
def create_code_cell(block):
"""Create a notebook code cell from a block."""
code_cell = nbbase.new_code_cell(source=block['content'])
attr = block['attributes']
if not attr.is_empty:
code_cell.metadata \
= nbbase.NotebookNode({'attributes': attr.to_dict()})
execution_count = attr.kvs.get('n')
if not execution_count:
code_cell.execution_count = None
else:
code_cell.execution_count = int(execution_count)
return code_cell | Create a notebook code cell from a block. |
def set_cmd_env_var(value):
"""Decorator that sets the temple command env var to value"""
def func_decorator(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
previous_cmd_env_var = os.getenv(temple.constants.TEMPLE_ENV_VAR)
os.environ[temple.constants.TEMPLE_ENV_VAR] = value
try:
ret_val = function(*args, **kwargs)
finally:
if previous_cmd_env_var is None:
del os.environ[temple.constants.TEMPLE_ENV_VAR]
else:
os.environ[temple.constants.TEMPLE_ENV_VAR] = previous_cmd_env_var
return ret_val
return wrapper
return func_decorator | Decorator that sets the temple command env var to value |
def getRoles(self):
"""Get all :class:`rtcclient.models.Role` objects in this project
area
If no :class:`Roles` are retrieved, `None` is returned.
:return: a :class:`list` that contains all
:class:`rtcclient.models.Role` objects
:rtype: list
"""
# no need to retrieve all the entries from _get_paged_resources
# role raw data is very simple that contains no other links
self.log.info("Get all the roles in <ProjectArea %s>",
self)
roles_url = "/".join([self.rtc_obj.url,
"process/project-areas/%s/roles" % self.id])
resp = self.get(roles_url,
verify=False,
proxies=self.rtc_obj.proxies,
headers=self.rtc_obj.headers)
roles_list = list()
raw_data = xmltodict.parse(resp.content)
roles_raw = raw_data['jp06:roles']['jp06:role']
if not roles_raw:
self.log.warning("There are no roles in <ProjectArea %s>",
self)
return None
for role_raw in roles_raw:
role = Role(role_raw.get("jp06:url"),
self.rtc_obj,
raw_data=role_raw)
roles_list.append(role)
return roles_list | Get all :class:`rtcclient.models.Role` objects in this project
area
If no :class:`Roles` are retrieved, `None` is returned.
:return: a :class:`list` that contains all
:class:`rtcclient.models.Role` objects
:rtype: list |
def namedb_state_transition( cur, opcode, op_data, block_id, vtxindex, txid, history_id, cur_record, record_table, constraints_ignored=[] ):
"""
Given an operation (opcode, op_data), a point in time (block_id, vtxindex, txid), and a current
record (history_id, cur_record), apply the operation to the record and save the delta to the record's
history. Also, insert or update the new record into the db.
The cur_record must exist already.
Return the newly updated record on success, with all compatibility quirks preserved.
Raise an exception on failure.
DO NOT CALL THIS METHOD DIRECTLY.
"""
# sanity check: must be a state-transitioning operation
try:
assert opcode in OPCODE_NAME_STATE_TRANSITIONS + OPCODE_NAMESPACE_STATE_TRANSITIONS, "BUG: opcode '%s' is not a state-transition"
assert 'opcode' not in op_data, 'BUG: opcode not allowed in op_data'
except Exception, e:
log.exception(e)
log.error("BUG: opcode '%s' is not a state-transition operation" % opcode)
os.abort()
# make sure we have a name/namespace_id and block number
op_data_name = copy.deepcopy(op_data)
if opcode in OPCODE_NAME_STATE_TRANSITIONS:
# name state transition
op_data_name['name'] = history_id
elif opcode in OPCODE_NAMESPACE_STATE_TRANSITIONS:
# namespace state transition
op_data_name['namespace_id'] = history_id
# sanity check make sure we got valid state transition data
try:
assert cur_record.has_key('block_number'), 'current record does not have a block number'
op_data_name['block_number'] = cur_record['block_number']
rc = namedb_state_transition_sanity_check( opcode, op_data_name, history_id, cur_record, record_table )
if not rc:
raise Exception("State transition sanity checks failed")
rc = namedb_state_mutation_sanity_check( opcode, op_data_name )
if not rc:
raise Exception("State mutation sanity checks failed")
except Exception, e:
log.exception(e)
log.error("FATAL: state transition sanity checks failed")
os.abort()
# 1. generate the new record that will be used for consensus.
# It will be the new data overlayed on the current record, with all quirks applied.
new_record = {}
new_record.update(cur_record)
new_record.update(op_data_name)
new_record['opcode'] = opcode
canonicalized_record = op_canonicalize_quirks(opcode, new_record, cur_record)
canonicalized_record['opcode'] = opcode
rc = namedb_history_save(cur, opcode, history_id, None, new_record.get('value_hash', None), block_id, vtxindex, txid, canonicalized_record)
if not rc:
log.error("FATAL: failed to save history for '%s' at (%s, %s)" % (history_id, block_id, vtxindex))
os.abort()
rc = False
merged_new_record = None
# 2. Store the actual op_data, to be returned on name lookups
# Don't store extra fields that don't belong in the db (i.e. that we don't have colunms for), but preserve them across the write.
stored_op_data = {}
stored_op_data.update(op_data_name)
# separate out the extras
_, op_data_extra = namedb_find_missing_and_extra(cur, stored_op_data, record_table)
if len(op_data_extra) > 0:
log.debug("Remove extra fields: {}".format(','.join(op_data_extra)))
for extra in op_data_extra:
del stored_op_data[extra]
if opcode in OPCODE_NAME_STATE_TRANSITIONS:
# name state transition
rc = namedb_name_update( cur, opcode, stored_op_data, constraints_ignored=constraints_ignored )
if not rc:
log.error("FATAL: opcode is not a state-transition operation on names")
os.abort()
merged_new_record = namedb_get_name(cur, history_id, block_id, include_history=False, include_expired=True)
elif opcode in OPCODE_NAMESPACE_STATE_TRANSITIONS:
# namespace state transition
rc = namedb_namespace_update( cur, opcode, stored_op_data, constraints_ignored=constraints_ignored )
if not rc:
log.error("FATAL: opcode is not a state-transition operation on namespaces")
os.abort()
merged_new_record = namedb_get_namespace(cur, history_id, block_id, include_history=False, include_expired=True)
# 3. success! make sure the merged_new_record is consistent with canonicalized_record
for f in merged_new_record:
if f not in canonicalized_record:
raise Exception("canonicalized record is missing {}".format(f))
return canonicalized_record | Given an operation (opcode, op_data), a point in time (block_id, vtxindex, txid), and a current
record (history_id, cur_record), apply the operation to the record and save the delta to the record's
history. Also, insert or update the new record into the db.
The cur_record must exist already.
Return the newly updated record on success, with all compatibility quirks preserved.
Raise an exception on failure.
DO NOT CALL THIS METHOD DIRECTLY. |
def delete_entity(self, partition_key, row_key,
if_match='*'):
'''
Adds a delete entity operation to the batch. See
:func:`~azure.storage.table.tableservice.TableService.delete_entity` for more
information on deletes.
The operation will not be executed until the batch is committed.
:param str partition_key:
The PartitionKey of the entity.
:param str row_key:
The RowKey of the entity.
:param str if_match:
The client may specify the ETag for the entity on the
request in order to compare to the ETag maintained by the service
for the purpose of optimistic concurrency. The delete operation
will be performed only if the ETag sent by the client matches the
value maintained by the server, indicating that the entity has
not been modified since it was retrieved by the client. To force
an unconditional delete, set If-Match to the wildcard character (*).
'''
request = _delete_entity(partition_key, row_key, if_match)
self._add_to_batch(partition_key, row_key, request) | Adds a delete entity operation to the batch. See
:func:`~azure.storage.table.tableservice.TableService.delete_entity` for more
information on deletes.
The operation will not be executed until the batch is committed.
:param str partition_key:
The PartitionKey of the entity.
:param str row_key:
The RowKey of the entity.
:param str if_match:
The client may specify the ETag for the entity on the
request in order to compare to the ETag maintained by the service
for the purpose of optimistic concurrency. The delete operation
will be performed only if the ETag sent by the client matches the
value maintained by the server, indicating that the entity has
not been modified since it was retrieved by the client. To force
an unconditional delete, set If-Match to the wildcard character (*). |
def render_to_json_response(self, context, **kwargs):
"""
Returns a JSON response, transforming 'context' to make the payload.
"""
return HttpResponse(
self.convert_context_to_json(context),
content_type='application/json',
**kwargs
) | Returns a JSON response, transforming 'context' to make the payload. |
def value(self):
"""Returns the model's average precision for each class
Return:
ap (FloatTensor): 1xK tensor, with avg precision for each class k
"""
if self.scores.numel() == 0:
return 0
ap = torch.zeros(self.scores.size(1))
rg = torch.arange(1, self.scores.size(0)).float()
# compute average precision for each class
for k in range(self.scores.size(1)):
# sort scores
scores = self.scores[:, k]
targets = self.targets[:, k]
# compute average precision
ap[k] = AveragePrecisionMeter.average_precision(scores, targets, self.difficult_examples)
return ap | Returns the model's average precision for each class
Return:
ap (FloatTensor): 1xK tensor, with avg precision for each class k |
def multi_index_df_to_component_dfs(multi_index_df, rid="rid", cid="cid"):
""" Convert a multi-index df into 3 component dfs. """
# Id level of the multiindex will become the index
rids = list(multi_index_df.index.get_level_values(rid))
cids = list(multi_index_df.columns.get_level_values(cid))
# It's possible that the index and/or columns of multi_index_df are not
# actually multi-index; need to check for this and there are more than one level in index(python3)
if isinstance(multi_index_df.index, pd.MultiIndex):
# check if there are more than one levels in index (python3)
if len(multi_index_df.index.names) > 1:
# If so, drop rid because it won't go into the body of the metadata
mi_df_index = multi_index_df.index.droplevel(rid)
# Names of the multiindex levels become the headers
rhds = list(mi_df_index.names)
# Assemble metadata values
row_metadata = np.array([mi_df_index.get_level_values(level).values for level in list(rhds)]).T
# if there is one level in index (python3), then rhds and row metadata should be empty
else:
rhds = []
row_metadata = []
# If the index is not multi-index, then rhds and row metadata should be empty
else:
rhds = []
row_metadata = []
# Check if columns of multi_index_df are in fact multi-index
if isinstance(multi_index_df.columns, pd.MultiIndex):
# Check if there are more than one levels in columns(python3)
if len(multi_index_df.columns.names) > 1:
# If so, drop cid because it won't go into the body of the metadata
mi_df_columns = multi_index_df.columns.droplevel(cid)
# Names of the multiindex levels become the headers
chds = list(mi_df_columns.names)
# Assemble metadata values
col_metadata = np.array([mi_df_columns.get_level_values(level).values for level in list(chds)]).T
# If there is one level in columns (python3), then rhds and row metadata should be empty
else:
chds = []
col_metadata = []
# If the columns are not multi-index, then rhds and row metadata should be empty
else:
chds = []
col_metadata = []
# Create component dfs
row_metadata_df = pd.DataFrame.from_records(row_metadata, index=pd.Index(rids, name="rid"), columns=pd.Index(rhds, name="rhd"))
col_metadata_df = pd.DataFrame.from_records(col_metadata, index=pd.Index(cids, name="cid"), columns=pd.Index(chds, name="chd"))
data_df = pd.DataFrame(multi_index_df.values, index=pd.Index(rids, name="rid"), columns=pd.Index(cids, name="cid"))
return data_df, row_metadata_df, col_metadata_df | Convert a multi-index df into 3 component dfs. |
def crossvalidate(self, foldsfile):
"""Train & Test using cross validation, testfile is a file that contains the filenames of all the folds!"""
options = "-F " + self.format + " " + self.timbloptions + " -t cross_validate"
print("Instantiating Timbl API : " + options,file=stderr)
if sys.version < '3':
self.api = timblapi.TimblAPI(b(options), b"")
else:
self.api = timblapi.TimblAPI(options, "")
if self.debug:
print("Enabling debug for timblapi",file=stderr)
self.api.enableDebug()
print("Calling Timbl Test : " + options,file=stderr)
if sys.version < '3':
self.api.test(b(foldsfile),b'',b'')
else:
self.api.test(u(foldsfile),'','')
a = self.api.getAccuracy()
del self.api
return a | Train & Test using cross validation, testfile is a file that contains the filenames of all the folds! |
def to_env_vars(self):
"""Environment variable representation of the training environment
Returns:
dict: an instance of dictionary
"""
env = {
'hosts': self.hosts, 'network_interface_name': self.network_interface_name,
'hps': self.hyperparameters, 'user_entry_point': self.user_entry_point,
'framework_params': self.additional_framework_parameters,
'resource_config': self.resource_config, 'input_data_config': self.input_data_config,
'output_data_dir': self.output_data_dir,
'channels': sorted(self.channel_input_dirs.keys()),
'current_host': self.current_host, 'module_name': self.module_name,
'log_level': self.log_level,
'framework_module': self.framework_module, 'input_dir': self.input_dir,
'input_config_dir': self.input_config_dir, 'output_dir': self.output_dir,
'num_cpus': self.num_cpus,
'num_gpus': self.num_gpus, 'model_dir': self.model_dir, 'module_dir': self.module_dir,
'training_env': dict(self), 'user_args': self.to_cmd_args(),
'output_intermediate_dir': self.output_intermediate_dir
}
for name, path in self.channel_input_dirs.items():
env['channel_%s' % name] = path
for key, value in self.hyperparameters.items():
env['hp_%s' % key] = value
return _mapping.to_env_vars(env) | Environment variable representation of the training environment
Returns:
dict: an instance of dictionary |
def upgrade():
"""Upgrade database."""
# table ObjectVersion: modify primary_key
if op.get_context().dialect.name == 'mysql':
Fk = 'fk_files_object_bucket_id_files_bucket'
op.execute(
'ALTER TABLE files_object '
'DROP FOREIGN KEY {0}, DROP PRIMARY KEY, '
'ADD PRIMARY KEY(version_id), '
'ADD FOREIGN KEY(bucket_id) '
'REFERENCES files_bucket(id)'.format(Fk))
else:
op.drop_constraint('pk_files_object', 'files_object', type_='primary')
op.create_primary_key(
'pk_files_object', 'files_object', ['version_id'])
op.create_unique_constraint(
'uq_files_object_bucket_id', 'files_object',
['bucket_id', 'version_id', 'key'])
# table ObjectVersionTag: create
op.create_table(
'files_objecttags',
sa.Column(
'version_id',
sqlalchemy_utils.types.uuid.UUIDType(),
nullable=False),
sa.Column(
'key',
sa.Text().with_variant(mysql.VARCHAR(255), 'mysql'),
nullable=False
),
sa.Column(
'value',
sa.Text().with_variant(mysql.VARCHAR(255), 'mysql'),
nullable=False
),
sa.PrimaryKeyConstraint('version_id', 'key'),
sa.ForeignKeyConstraint(
['version_id'],
[u'files_object.version_id'],
ondelete='CASCADE'),
) | Upgrade database. |
def data2schema(
_data=None, _force=False, _besteffort=True, _registry=None,
_factory=None, _buildkwargs=None, **kwargs
):
"""Get the schema able to instanciate input data.
The default value of schema will be data.
Can be used such as a decorator:
..code-block:: python
@data2schema
def example(): pass # return a function schema
@data2schema(_registry=myregistry)
def example(): pass # return a function schema with specific registry
..warning::
return this function id _data is None.
:param _data: data possibly generated by a schema. Required but in case of
decorator.
:param bool _force: if True (False by default), create the data schema
on the fly if it does not exist.
:param bool _besteffort: if True (default), find a schema class able to
validate data class by inheritance.
:param SchemaRegistry _registry: default registry to use. Global by
default.
:param SchemaFactory factory: default factory to use. Global by default.
:param dict _buildkwargs: factory builder kwargs.
:param kwargs: schema class kwargs.
:return: Schema.
:rtype: Schema.
"""
if _data is None:
return lambda _data: data2schema(
_data, _force=False, _besteffort=True, _registry=None,
_factory=None, _buildkwargs=None, **kwargs
)
result = None
fdata = _data() if isinstance(_data, DynamicValue) else _data
datatype = type(fdata)
content = getattr(fdata, '__dict__', {})
if _buildkwargs:
content.udpate(_buildkwargs)
schemacls = datatype2schemacls(
_datatype=datatype, _registry=_registry, _factory=_factory,
_force=_force, _besteffort=_besteffort, **content
)
if schemacls is not None:
result = schemacls(default=_data, **kwargs)
for attrname in dir(_data):
if not hasattr(schemacls, attrname):
attr = getattr(_data, attrname)
if attr is not None:
setattr(result, attrname, attr)
if result is None and _data is None:
result = AnySchema()
return result | Get the schema able to instanciate input data.
The default value of schema will be data.
Can be used such as a decorator:
..code-block:: python
@data2schema
def example(): pass # return a function schema
@data2schema(_registry=myregistry)
def example(): pass # return a function schema with specific registry
..warning::
return this function id _data is None.
:param _data: data possibly generated by a schema. Required but in case of
decorator.
:param bool _force: if True (False by default), create the data schema
on the fly if it does not exist.
:param bool _besteffort: if True (default), find a schema class able to
validate data class by inheritance.
:param SchemaRegistry _registry: default registry to use. Global by
default.
:param SchemaFactory factory: default factory to use. Global by default.
:param dict _buildkwargs: factory builder kwargs.
:param kwargs: schema class kwargs.
:return: Schema.
:rtype: Schema. |
def save_thumbnail(image_path_template, src_file, file_conf, gallery_conf):
"""Generate and Save the thumbnail image
Parameters
----------
image_path_template : str
holds the template where to save and how to name the image
src_file : str
path to source python file
gallery_conf : dict
Sphinx-Gallery configuration dictionary
"""
# read specification of the figure to display as thumbnail from main text
thumbnail_number = file_conf.get('thumbnail_number', 1)
if not isinstance(thumbnail_number, int):
raise TypeError(
'sphinx_gallery_thumbnail_number setting is not a number, '
'got %r' % (thumbnail_number,))
thumbnail_image_path, ext = _find_image_ext(image_path_template,
thumbnail_number)
thumb_dir = os.path.join(os.path.dirname(thumbnail_image_path), 'thumb')
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
base_image_name = os.path.splitext(os.path.basename(src_file))[0]
thumb_file = os.path.join(thumb_dir,
'sphx_glr_%s_thumb.%s' % (base_image_name, ext))
if src_file in gallery_conf['failing_examples']:
img = os.path.join(glr_path_static(), 'broken_example.png')
elif os.path.exists(thumbnail_image_path):
img = thumbnail_image_path
elif not os.path.exists(thumb_file):
# create something to replace the thumbnail
img = os.path.join(glr_path_static(), 'no_image.png')
img = gallery_conf.get("default_thumb_file", img)
else:
return
if ext == 'svg':
copyfile(img, thumb_file)
else:
scale_image(img, thumb_file, *gallery_conf["thumbnail_size"]) | Generate and Save the thumbnail image
Parameters
----------
image_path_template : str
holds the template where to save and how to name the image
src_file : str
path to source python file
gallery_conf : dict
Sphinx-Gallery configuration dictionary |
def _parse_lti_data(self, courseid, taskid):
""" Verify and parse the data for the LTI basic launch """
post_input = web.webapi.rawinput("POST")
self.logger.debug('_parse_lti_data:' + str(post_input))
try:
course = self.course_factory.get_course(courseid)
except exceptions.CourseNotFoundException as ex:
raise web.notfound(str(ex))
try:
test = LTIWebPyToolProvider.from_webpy_request()
validator = LTIValidator(self.database.nonce, course.lti_keys())
verified = test.is_valid_request(validator)
except Exception:
self.logger.exception("...")
self.logger.info("Error while validating LTI request for %s", str(post_input))
raise web.forbidden(_("Error while validating LTI request"))
if verified:
self.logger.debug('parse_lit_data for %s', str(post_input))
user_id = post_input["user_id"]
roles = post_input.get("roles", "Student").split(",")
realname = self._find_realname(post_input)
email = post_input.get("lis_person_contact_email_primary", "")
lis_outcome_service_url = post_input.get("lis_outcome_service_url", None)
outcome_result_id = post_input.get("lis_result_sourcedid", None)
consumer_key = post_input["oauth_consumer_key"]
if course.lti_send_back_grade():
if lis_outcome_service_url is None or outcome_result_id is None:
self.logger.info('Error: lis_outcome_service_url is None but lti_send_back_grade is True')
raise web.forbidden(_("In order to send grade back to the TC, INGInious needs the parameters lis_outcome_service_url and "
"lis_outcome_result_id in the LTI basic-launch-request. Please contact your administrator."))
else:
lis_outcome_service_url = None
outcome_result_id = None
tool_name = post_input.get('tool_consumer_instance_name', 'N/A')
tool_desc = post_input.get('tool_consumer_instance_description', 'N/A')
tool_url = post_input.get('tool_consumer_instance_url', 'N/A')
context_title = post_input.get('context_title', 'N/A')
context_label = post_input.get('context_label', 'N/A')
session_id = self.user_manager.create_lti_session(user_id, roles, realname, email, courseid, taskid, consumer_key,
lis_outcome_service_url, outcome_result_id, tool_name, tool_desc, tool_url,
context_title, context_label)
loggedin = self.user_manager.attempt_lti_login()
return session_id, loggedin
else:
self.logger.info("Couldn't validate LTI request")
raise web.forbidden(_("Couldn't validate LTI request")) | Verify and parse the data for the LTI basic launch |
def get_tables(self):
""" Returns a collection of this worksheet tables"""
url = self.build_url(self._endpoints.get('get_tables'))
response = self.session.get(url)
if not response:
return []
data = response.json()
return [self.table_constructor(parent=self, **{self._cloud_data_key: table})
for table in data.get('value', [])] | Returns a collection of this worksheet tables |
def _parse_hello_extensions(data):
"""
Creates a generator returning tuples of information about each extension
from a byte string of extension data contained in a ServerHello ores
ClientHello message
:param data:
A byte string of a extension data from a TLS ServerHello or ClientHello
message
:return:
A generator that yields 2-element tuples:
[0] Byte string of extension type
[1] Byte string of extension data
"""
if data == b'':
return
extentions_length = int_from_bytes(data[0:2])
extensions_start = 2
extensions_end = 2 + extentions_length
pointer = extensions_start
while pointer < extensions_end:
extension_type = int_from_bytes(data[pointer:pointer + 2])
extension_length = int_from_bytes(data[pointer + 2:pointer + 4])
yield (
extension_type,
data[pointer + 4:pointer + 4 + extension_length]
)
pointer += 4 + extension_length | Creates a generator returning tuples of information about each extension
from a byte string of extension data contained in a ServerHello ores
ClientHello message
:param data:
A byte string of a extension data from a TLS ServerHello or ClientHello
message
:return:
A generator that yields 2-element tuples:
[0] Byte string of extension type
[1] Byte string of extension data |
def read_config_file(self, file_name):
"""
Reads a CWR grammar config file.
:param file_name: name of the text file
:return: the file's contents
"""
with open(os.path.join(self.__path(), os.path.basename(file_name)),
'rt') as file_config:
return self._parser.parseString(file_config.read()) | Reads a CWR grammar config file.
:param file_name: name of the text file
:return: the file's contents |
def spin(self):
""" Perform a single spin """
for x in self.spinchars:
self.string = self.msg + "...\t" + x + "\r"
self.out.write(self.string.encode('utf-8'))
self.out.flush()
time.sleep(self.waittime) | Perform a single spin |
def check_chunks(n_samples, n_features, chunks=None):
"""Validate and normalize the chunks argument for a dask.array
Parameters
----------
n_samples, n_features : int
Give the shape of the array
chunks : int, sequence, optional, default None
* For 'chunks=None', this picks a "good" default number of chunks based
on the number of CPU cores. The default results in a block structure
with one block per core along the first dimension (of roughly equal
lengths) and a single block along the second dimension. This may or
may not be appropriate for your use-case. The chunk size will be at
least 100 along the first dimension.
* When chunks is an int, we split the ``n_samples`` into ``chunks``
blocks along the first dimension, and a single block along the
second. Again, the chunksize will be at least 100 along the first
dimension.
* When chunks is a sequence, we validate that it's length two and turn
it into a tuple.
Returns
-------
chunks : tuple
"""
if chunks is None:
chunks = (max(100, n_samples // cpu_count()), n_features)
elif isinstance(chunks, Integral):
chunks = (max(100, n_samples // chunks), n_features)
elif isinstance(chunks, Sequence):
chunks = tuple(chunks)
if len(chunks) != 2:
raise AssertionError("Chunks should be a 2-tuple.")
else:
raise ValueError("Unknown type of chunks: '{}'".format(type(chunks)))
return chunks | Validate and normalize the chunks argument for a dask.array
Parameters
----------
n_samples, n_features : int
Give the shape of the array
chunks : int, sequence, optional, default None
* For 'chunks=None', this picks a "good" default number of chunks based
on the number of CPU cores. The default results in a block structure
with one block per core along the first dimension (of roughly equal
lengths) and a single block along the second dimension. This may or
may not be appropriate for your use-case. The chunk size will be at
least 100 along the first dimension.
* When chunks is an int, we split the ``n_samples`` into ``chunks``
blocks along the first dimension, and a single block along the
second. Again, the chunksize will be at least 100 along the first
dimension.
* When chunks is a sequence, we validate that it's length two and turn
it into a tuple.
Returns
-------
chunks : tuple |
def add_spectra(self, spectra_dict, key_sort_func=None):
"""
Add a dictionary of doses, with an optional sorting function for the
keys.
Args:
dos_dict: dict of {label: Dos}
key_sort_func: function used to sort the dos_dict keys.
"""
if key_sort_func:
keys = sorted(spectra_dict.keys(), key=key_sort_func)
else:
keys = spectra_dict.keys()
for label in keys:
self.add_spectra(label, spectra_dict[label]) | Add a dictionary of doses, with an optional sorting function for the
keys.
Args:
dos_dict: dict of {label: Dos}
key_sort_func: function used to sort the dos_dict keys. |
def knot_removal_alpha_j(u, degree, knotvector, num, idx):
""" Computes :math:`\\alpha_{j}` coefficient for knot removal algorithm.
Please refer to Eq. 5.29 of The NURBS Book by Piegl & Tiller, 2nd Edition, p.184 for details.
:param u: knot
:type u: float
:param degree: degree
:type degree: int
:param knotvector: knot vector
:type knotvector: tuple
:param num: knot removal index
:type num: int
:param idx: iterator index
:type idx: int
:return: coefficient value
:rtype: float
"""
return (u - knotvector[idx - num]) / (knotvector[idx + degree + 1] - knotvector[idx - num]) | Computes :math:`\\alpha_{j}` coefficient for knot removal algorithm.
Please refer to Eq. 5.29 of The NURBS Book by Piegl & Tiller, 2nd Edition, p.184 for details.
:param u: knot
:type u: float
:param degree: degree
:type degree: int
:param knotvector: knot vector
:type knotvector: tuple
:param num: knot removal index
:type num: int
:param idx: iterator index
:type idx: int
:return: coefficient value
:rtype: float |
def get_end_trigger(options):
"""
When to end the optimization based on input option.
"""
if options.endTriggerType.lower() == "epoch":
return MaxEpoch(options.endTriggerNum)
else:
return MaxIteration(options.endTriggerNum) | When to end the optimization based on input option. |
def load_dependencies(req, history=None):
"""
Load the dependency tree as a Python object tree,
suitable for JSON serialization.
>>> deps = load_dependencies('jaraco.packaging')
>>> import json
>>> doc = json.dumps(deps)
"""
if history is None:
history = set()
dist = pkg_resources.get_distribution(req)
spec = dict(
requirement=str(req),
resolved=str(dist),
)
if req not in history:
# traverse into children
history.add(req)
extras = parse_extras(req)
depends = [
load_dependencies(dep, history=history)
for dep in dist.requires(extras=extras)
]
if depends:
spec.update(depends=depends)
return spec | Load the dependency tree as a Python object tree,
suitable for JSON serialization.
>>> deps = load_dependencies('jaraco.packaging')
>>> import json
>>> doc = json.dumps(deps) |
def coalesce_events(self, coalesce=True):
"""
Coalescing events. Events are usually processed by batchs, their size
depend on various factors. Thus, before processing them, events received
from inotify are aggregated in a fifo queue. If this coalescing
option is enabled events are filtered based on their unicity, only
unique events are enqueued, doublons are discarded. An event is unique
when the combination of its fields (wd, mask, cookie, name) is unique
among events of a same batch. After a batch of events is processed any
events is accepted again. By default this option is disabled, you have
to explictly call this function to turn it on.
@param coalesce: Optional new coalescing value. True by default.
@type coalesce: Bool
"""
self._coalesce = coalesce
if not coalesce:
self._eventset.clear() | Coalescing events. Events are usually processed by batchs, their size
depend on various factors. Thus, before processing them, events received
from inotify are aggregated in a fifo queue. If this coalescing
option is enabled events are filtered based on their unicity, only
unique events are enqueued, doublons are discarded. An event is unique
when the combination of its fields (wd, mask, cookie, name) is unique
among events of a same batch. After a batch of events is processed any
events is accepted again. By default this option is disabled, you have
to explictly call this function to turn it on.
@param coalesce: Optional new coalescing value. True by default.
@type coalesce: Bool |
def Update(self):
"""Commit current PublicIP definition to cloud.
Usually called by the class to commit changes to port and source restriction policies.
>>> clc.v2.Server("WA1BTDIX01").PublicIPs().public_ips[0].Update().WaitUntilComplete()
0
"""
return(clc.v2.Requests(clc.v2.API.Call('PUT','servers/%s/%s/publicIPAddresses/%s' % (self.parent.server.alias,self.parent.server.id,self.id),
json.dumps({'ports': [o.ToDict() for o in self.ports],
'sourceRestrictions': [o.ToDict() for o in self.source_restrictions] }),
session=self.session),
alias=self.parent.server.alias,
session=self.session)) | Commit current PublicIP definition to cloud.
Usually called by the class to commit changes to port and source restriction policies.
>>> clc.v2.Server("WA1BTDIX01").PublicIPs().public_ips[0].Update().WaitUntilComplete()
0 |
def read_file(self):
"""
Grabs filename and enables it to be read.
:return: raw_file = unaltered text; file_lines = text split by lines.
"""
with open(self.filename, mode='r+', encoding='utf8') as text_file:
self.raw_file = text_file.read() # pylint: disable= attribute-defined-outside-init
self.file_lines = [x.rstrip() for x in self.raw_file.splitlines()] | Grabs filename and enables it to be read.
:return: raw_file = unaltered text; file_lines = text split by lines. |
def iter_by_year(self):
"""Split the return objects by year and iterate"""
for yr, details in self.txn_details.iter_by_year():
yield yr, Performance(details) | Split the return objects by year and iterate |
def writeToDelimitedString(obj, stream=None):
"""
Stanford CoreNLP uses the Java "writeDelimitedTo" function, which
writes the size (and offset) of the buffer before writing the object.
This function handles parsing this message starting from offset 0.
@returns how many bytes of @buf were consumed.
"""
if stream is None:
stream = BytesIO()
_EncodeVarint(stream.write, obj.ByteSize(), True)
stream.write(obj.SerializeToString())
return stream | Stanford CoreNLP uses the Java "writeDelimitedTo" function, which
writes the size (and offset) of the buffer before writing the object.
This function handles parsing this message starting from offset 0.
@returns how many bytes of @buf were consumed. |
def parse_sentence(obj: dict) -> BioCSentence:
"""Deserialize a dict obj to a BioCSentence object"""
sentence = BioCSentence()
sentence.offset = obj['offset']
sentence.infons = obj['infons']
sentence.text = obj['text']
for annotation in obj['annotations']:
sentence.add_annotation(parse_annotation(annotation))
for relation in obj['relations']:
sentence.add_relation(parse_relation(relation))
return sentence | Deserialize a dict obj to a BioCSentence object |
def rdn_to_dn(changes: Changeset, name: str, base_dn: str) -> Changeset:
""" Convert the rdn to a fully qualified DN for the specified LDAP
connection.
:param changes: The changes object to lookup.
:param name: rdn to convert.
:param base_dn: The base_dn to lookup.
:return: fully qualified DN.
"""
dn = changes.get_value_as_single('dn')
if dn is not None:
return changes
value = changes.get_value_as_single(name)
if value is None:
raise tldap.exceptions.ValidationError(
"Cannot use %s in dn as it is None" % name)
if isinstance(value, list):
raise tldap.exceptions.ValidationError(
"Cannot use %s in dn as it is a list" % name)
assert base_dn is not None
split_base = str2dn(base_dn)
split_new_dn = [[(name, value, 1)]] + split_base
new_dn = dn2str(split_new_dn)
return changes.set('dn', new_dn) | Convert the rdn to a fully qualified DN for the specified LDAP
connection.
:param changes: The changes object to lookup.
:param name: rdn to convert.
:param base_dn: The base_dn to lookup.
:return: fully qualified DN. |
def main():
"""
Main function, called when run as an application.
"""
global args, server_address
# parse the command line arguments
parser = ArgumentParser(description=__doc__)
parser.add_argument(
"host", nargs='?',
help="address of host (default %r)" % (SERVER_HOST,),
default=SERVER_HOST,
)
parser.add_argument(
"port", nargs='?', type=int,
help="server port (default %r)" % (SERVER_PORT,),
default=SERVER_PORT,
)
parser.add_argument(
"--hello", action="store_true",
default=False,
help="send a hello message",
)
parser.add_argument(
"--connect-timeout", nargs='?', type=int,
help="idle connection timeout",
default=CONNECT_TIMEOUT,
)
parser.add_argument(
"--idle-timeout", nargs='?', type=int,
help="idle connection timeout",
default=IDLE_TIMEOUT,
)
args = parser.parse_args()
if _debug: _log.debug("initialization")
if _debug: _log.debug(" - args: %r", args)
# extract the server address and port
host = args.host
port = args.port
server_address = (host, port)
if _debug: _log.debug(" - server_address: %r", server_address)
# build the stack
this_console = ConsoleClient()
if _debug: _log.debug(" - this_console: %r", this_console)
this_middle_man = MiddleMan()
if _debug: _log.debug(" - this_middle_man: %r", this_middle_man)
this_director = TCPClientDirector(
connect_timeout=args.connect_timeout,
idle_timeout=args.idle_timeout,
)
if _debug: _log.debug(" - this_director: %r", this_director)
bind(this_console, this_middle_man, this_director)
bind(MiddleManASE(), this_director)
# create a task manager for scheduled functions
task_manager = TaskManager()
if _debug: _log.debug(" - task_manager: %r", task_manager)
# don't wait to connect
deferred(this_director.connect, server_address)
# send hello maybe
if args.hello:
deferred(this_middle_man.indication, PDU(b'Hello, world!\n'))
if _debug: _log.debug("running")
run()
if _debug: _log.debug("fini") | Main function, called when run as an application. |
def get_cdd_hdd_candidate_models(
data,
minimum_non_zero_cdd,
minimum_non_zero_hdd,
minimum_total_cdd,
minimum_total_hdd,
beta_cdd_maximum_p_value,
beta_hdd_maximum_p_value,
weights_col,
):
""" Return a list of candidate cdd_hdd models for a particular selection
of cooling balance point and heating balance point
Parameters
----------
data : :any:`pandas.DataFrame`
A DataFrame containing at least the column ``meter_value`` and 1 to n
columns each of the form ``hdd_<heating_balance_point>``
and ``cdd_<cooling_balance_point>``. DataFrames of this form can be
made using the
:any:`eemeter.create_caltrack_daily_design_matrix` or
:any:`eemeter.create_caltrack_billing_design_matrix` methods.
minimum_non_zero_cdd : :any:`int`
Minimum allowable number of non-zero cooling degree day values.
minimum_non_zero_hdd : :any:`int`
Minimum allowable number of non-zero heating degree day values.
minimum_total_cdd : :any:`float`
Minimum allowable total sum of cooling degree day values.
minimum_total_hdd : :any:`float`
Minimum allowable total sum of heating degree day values.
beta_cdd_maximum_p_value : :any:`float`
The maximum allowable p-value of the beta cdd parameter.
beta_hdd_maximum_p_value : :any:`float`
The maximum allowable p-value of the beta hdd parameter.
weights_col : :any:`str` or None
The name of the column (if any) in ``data`` to use as weights.
Returns
-------
candidate_models : :any:`list` of :any:`CalTRACKUsagePerDayCandidateModel`
A list of cdd_hdd candidate models, with any associated warnings.
"""
cooling_balance_points = [
int(col[4:]) for col in data.columns if col.startswith("cdd")
]
heating_balance_points = [
int(col[4:]) for col in data.columns if col.startswith("hdd")
]
# CalTrack 3.2.2.1
candidate_models = [
get_single_cdd_hdd_candidate_model(
data,
minimum_non_zero_cdd,
minimum_non_zero_hdd,
minimum_total_cdd,
minimum_total_hdd,
beta_cdd_maximum_p_value,
beta_hdd_maximum_p_value,
weights_col,
cooling_balance_point,
heating_balance_point,
)
for cooling_balance_point in cooling_balance_points
for heating_balance_point in heating_balance_points
if heating_balance_point <= cooling_balance_point
]
return candidate_models | Return a list of candidate cdd_hdd models for a particular selection
of cooling balance point and heating balance point
Parameters
----------
data : :any:`pandas.DataFrame`
A DataFrame containing at least the column ``meter_value`` and 1 to n
columns each of the form ``hdd_<heating_balance_point>``
and ``cdd_<cooling_balance_point>``. DataFrames of this form can be
made using the
:any:`eemeter.create_caltrack_daily_design_matrix` or
:any:`eemeter.create_caltrack_billing_design_matrix` methods.
minimum_non_zero_cdd : :any:`int`
Minimum allowable number of non-zero cooling degree day values.
minimum_non_zero_hdd : :any:`int`
Minimum allowable number of non-zero heating degree day values.
minimum_total_cdd : :any:`float`
Minimum allowable total sum of cooling degree day values.
minimum_total_hdd : :any:`float`
Minimum allowable total sum of heating degree day values.
beta_cdd_maximum_p_value : :any:`float`
The maximum allowable p-value of the beta cdd parameter.
beta_hdd_maximum_p_value : :any:`float`
The maximum allowable p-value of the beta hdd parameter.
weights_col : :any:`str` or None
The name of the column (if any) in ``data`` to use as weights.
Returns
-------
candidate_models : :any:`list` of :any:`CalTRACKUsagePerDayCandidateModel`
A list of cdd_hdd candidate models, with any associated warnings. |
def handle_authorized(self, event):
"""Send the initial presence after log-in."""
request_software_version(self.client, self.target_jid,
self.success, self.failure) | Send the initial presence after log-in. |
def ffill(arr, dim=None, limit=None):
'''forward fill missing values'''
import bottleneck as bn
axis = arr.get_axis_num(dim)
# work around for bottleneck 178
_limit = limit if limit is not None else arr.shape[axis]
return apply_ufunc(bn.push, arr,
dask='parallelized',
keep_attrs=True,
output_dtypes=[arr.dtype],
kwargs=dict(n=_limit, axis=axis)).transpose(*arr.dims) | forward fill missing values |
def get_urlpatterns(self):
""" Returns the URL patterns managed by the considered factory / application. """
return [
path(
_('topic/<str:slug>-<int:pk>/lock/'),
self.topic_lock_view.as_view(),
name='topic_lock',
),
path(
_('topic/<str:slug>-<int:pk>/unlock/'),
self.topic_unlock_view.as_view(),
name='topic_unlock',
),
path(
_('topic/<str:slug>-<int:pk>/delete/'),
self.topic_delete_view.as_view(),
name='topic_delete',
),
path(
_('topic/<str:slug>-<int:pk>/move/'),
self.topic_move_view.as_view(),
name='topic_move',
),
path(
_('topic/<str:slug>-<int:pk>/change/topic/'),
self.topic_update_to_normal_topic_view.as_view(),
name='topic_update_to_post',
),
path(
_('topic/<str:slug>-<int:pk>/change/sticky/'),
self.topic_update_to_sticky_topic_view.as_view(),
name='topic_update_to_sticky',
),
path(
_('topic/<str:slug>-<int:pk>/change/announce/'),
self.topic_update_to_announce_view.as_view(),
name='topic_update_to_announce',
),
path(_('queue/'), self.moderation_queue_list_view.as_view(), name='queue'),
path(
_('queue/<int:pk>/'),
self.moderation_queue_detail_view.as_view(),
name='queued_post',
),
path(
_('queue/<int:pk>/approve/'),
self.post_approve_view.as_view(),
name='approve_queued_post',
),
path(
_('queue/<int:pk>/disapprove/'),
self.post_disapprove_view.as_view(),
name='disapprove_queued_post',
),
] | Returns the URL patterns managed by the considered factory / application. |
def UpdateValues(self):
"""Update all displayed values"""
# This sends an event to the grid table
# to update all of the values
msg = wx.grid.GridTableMessage(self,
wx.grid.GRIDTABLE_REQUEST_VIEW_GET_VALUES)
self.grid.ProcessTableMessage(msg) | Update all displayed values |
def _Execute(statements, context, callback, trace):
"""Execute a bunch of template statements in a ScopedContext.
Args:
callback: Strings are "written" to this callback function.
trace: Trace object, or None
This is called in a mutually recursive fashion.
"""
# Every time we call _Execute, increase this depth
if trace:
trace.exec_depth += 1
for i, statement in enumerate(statements):
if isinstance(statement, six.string_types):
callback(statement)
else:
# In the case of a substitution, args is a pair (name, formatters).
# In the case of a section, it's a _Section instance.
try:
func, args = statement
func(args, context, callback, trace)
except UndefinedVariable as e:
# Show context for statements
start = max(0, i - 3)
end = i + 3
e.near = statements[start:end]
e.trace = trace # Attach caller's trace (could be None)
raise | Execute a bunch of template statements in a ScopedContext.
Args:
callback: Strings are "written" to this callback function.
trace: Trace object, or None
This is called in a mutually recursive fashion. |
def get_page_template(self, **kwargs):
"""Return the template name used for this request.
Only called if *page_template* is not given as a kwarg of
*self.as_view*.
"""
opts = self.object_list.model._meta
return '{0}/{1}{2}{3}.html'.format(
opts.app_label,
opts.object_name.lower(),
self.template_name_suffix,
self.page_template_suffix,
) | Return the template name used for this request.
Only called if *page_template* is not given as a kwarg of
*self.as_view*. |
def get_flat(self):
"""Gets the weights and returns them as a flat array.
Returns:
1D Array containing the flattened weights.
"""
self._check_sess()
return np.concatenate([
v.eval(session=self.sess).flatten()
for v in self.variables.values()
]) | Gets the weights and returns them as a flat array.
Returns:
1D Array containing the flattened weights. |
def macronize_tags(self, text):
"""Return macronized form along with POS tags.
E.g. "Gallia est omnis divisa in partes tres," ->
[('gallia', 'n-s---fb-', 'galliā'), ('est', 'v3spia---', 'est'), ('omnis', 'a-s---mn-', 'omnis'),
('divisa', 't-prppnn-', 'dīvīsa'), ('in', 'r--------', 'in'), ('partes', 'n-p---fa-', 'partēs'),
('tres', 'm--------', 'trēs')]
:param text: raw text
:return: tuples of head word, tag, macronized form
:rtype : list
"""
return [self._macronize_word(word) for word in self._retrieve_tag(text)] | Return macronized form along with POS tags.
E.g. "Gallia est omnis divisa in partes tres," ->
[('gallia', 'n-s---fb-', 'galliā'), ('est', 'v3spia---', 'est'), ('omnis', 'a-s---mn-', 'omnis'),
('divisa', 't-prppnn-', 'dīvīsa'), ('in', 'r--------', 'in'), ('partes', 'n-p---fa-', 'partēs'),
('tres', 'm--------', 'trēs')]
:param text: raw text
:return: tuples of head word, tag, macronized form
:rtype : list |
def disconnect(self, mol):
"""Break covalent bonds between metals and organic atoms under certain conditions.
The algorithm works as follows:
- Disconnect N, O, F from any metal.
- Disconnect other non-metals from transition metals + Al (but not Hg, Ga, Ge, In, Sn, As, Tl, Pb, Bi, Po).
- For every bond broken, adjust the charges of the begin and end atoms accordingly.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:return: The molecule with metals disconnected.
:rtype: rdkit.Chem.rdchem.Mol
"""
log.debug('Running MetalDisconnector')
# Remove bonds that match SMARTS
for smarts in [self._metal_nof, self._metal_non]:
pairs = mol.GetSubstructMatches(smarts)
rwmol = Chem.RWMol(mol)
orders = []
for i, j in pairs:
# TODO: Could get the valence contributions of the bond instead of GetBondTypeAsDouble?
orders.append(int(mol.GetBondBetweenAtoms(i, j).GetBondTypeAsDouble()))
rwmol.RemoveBond(i, j)
# Adjust neighbouring charges accordingly
mol = rwmol.GetMol()
for n, (i, j) in enumerate(pairs):
chg = orders[n]
atom1 = mol.GetAtomWithIdx(i)
atom1.SetFormalCharge(atom1.GetFormalCharge() + chg)
atom2 = mol.GetAtomWithIdx(j)
atom2.SetFormalCharge(atom2.GetFormalCharge() - chg)
log.info('Removed covalent bond between %s and %s', atom1.GetSymbol(), atom2.GetSymbol())
Chem.SanitizeMol(mol)
return mol | Break covalent bonds between metals and organic atoms under certain conditions.
The algorithm works as follows:
- Disconnect N, O, F from any metal.
- Disconnect other non-metals from transition metals + Al (but not Hg, Ga, Ge, In, Sn, As, Tl, Pb, Bi, Po).
- For every bond broken, adjust the charges of the begin and end atoms accordingly.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:return: The molecule with metals disconnected.
:rtype: rdkit.Chem.rdchem.Mol |
def apply_time_offset(time, years=0, months=0, days=0, hours=0):
"""Apply a specified offset to the given time array.
This is useful for GFDL model output of instantaneous values. For example,
3 hourly data postprocessed to netCDF files spanning 1 year each will
actually have time values that are offset by 3 hours, such that the first
value is for 1 Jan 03:00 and the last value is 1 Jan 00:00 of the
subsequent year. This causes problems in xarray, e.g. when trying to group
by month. It is resolved by manually subtracting off those three hours,
such that the dates span from 1 Jan 00:00 to 31 Dec 21:00 as desired.
Parameters
----------
time : xarray.DataArray representing a timeseries
years, months, days, hours : int, optional
The number of years, months, days, and hours, respectively, to offset
the time array by. Positive values move the times later.
Returns
-------
pandas.DatetimeIndex
Examples
--------
Case of a length-1 input time array:
>>> times = xr.DataArray(datetime.datetime(1899, 12, 31, 21))
>>> apply_time_offset(times)
Timestamp('1900-01-01 00:00:00')
Case of input time array with length greater than one:
>>> times = xr.DataArray([datetime.datetime(1899, 12, 31, 21),
... datetime.datetime(1899, 1, 31, 21)])
>>> apply_time_offset(times) # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['1900-01-01', '1899-02-01'], dtype='datetime64[ns]',
freq=None)
"""
return (pd.to_datetime(time.values) +
pd.DateOffset(years=years, months=months, days=days, hours=hours)) | Apply a specified offset to the given time array.
This is useful for GFDL model output of instantaneous values. For example,
3 hourly data postprocessed to netCDF files spanning 1 year each will
actually have time values that are offset by 3 hours, such that the first
value is for 1 Jan 03:00 and the last value is 1 Jan 00:00 of the
subsequent year. This causes problems in xarray, e.g. when trying to group
by month. It is resolved by manually subtracting off those three hours,
such that the dates span from 1 Jan 00:00 to 31 Dec 21:00 as desired.
Parameters
----------
time : xarray.DataArray representing a timeseries
years, months, days, hours : int, optional
The number of years, months, days, and hours, respectively, to offset
the time array by. Positive values move the times later.
Returns
-------
pandas.DatetimeIndex
Examples
--------
Case of a length-1 input time array:
>>> times = xr.DataArray(datetime.datetime(1899, 12, 31, 21))
>>> apply_time_offset(times)
Timestamp('1900-01-01 00:00:00')
Case of input time array with length greater than one:
>>> times = xr.DataArray([datetime.datetime(1899, 12, 31, 21),
... datetime.datetime(1899, 1, 31, 21)])
>>> apply_time_offset(times) # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['1900-01-01', '1899-02-01'], dtype='datetime64[ns]',
freq=None) |
def _filter_defs_at_call_sites(self, defs):
"""
If we are not tracing into the function that are called in a real execution, we should properly filter the defs
to account for the behavior of the skipped function at this call site.
This function is a WIP. See TODOs inside.
:param defs:
:return:
"""
# TODO: make definition killing architecture independent and calling convention independent
# TODO: use information from a calling convention analysis
filtered_defs = LiveDefinitions()
for variable, locs in defs.items():
if isinstance(variable, SimRegisterVariable):
if self.project.arch.name == 'X86':
if variable.reg in (self.project.arch.registers['eax'][0],
self.project.arch.registers['ecx'][0],
self.project.arch.registers['edx'][0]):
continue
filtered_defs.add_defs(variable, locs)
return filtered_defs | If we are not tracing into the function that are called in a real execution, we should properly filter the defs
to account for the behavior of the skipped function at this call site.
This function is a WIP. See TODOs inside.
:param defs:
:return: |
def _serialize_input_list(input_value):
"""Recursively serialize task input list"""
input_list = []
for item in input_value:
if isinstance(item, list):
input_list.append(Task._serialize_input_list(item))
else:
if isinstance(item, File):
item = Task._to_api_file_format(item)
input_list.append(item)
return input_list | Recursively serialize task input list |
def get_ssm_parameter(parameter_name):
'''
Get the decrypted value of an SSM parameter
Args:
parameter_name - the name of the stored parameter of interest
Return:
Value if allowed and present else None
'''
try:
response = boto3.client('ssm').get_parameters(
Names=[parameter_name],
WithDecryption=True
)
return response.get('Parameters', None)[0].get('Value', '')
except Exception:
pass
return '' | Get the decrypted value of an SSM parameter
Args:
parameter_name - the name of the stored parameter of interest
Return:
Value if allowed and present else None |
def device_status(self):
"""Return the status of the device as string."""
try:
return self.device_status_simple(
self.data.get('status').get('status1'))
except (KeyError, AttributeError):
return self.device_status_simple('') | Return the status of the device as string. |
def mutect_to_bed(df):
"""Convert MuTect results (read into dataframe) to BedTool object
Parameters
----------
df : pandas.DataFrame
Pandas DataFrame with MuTect results.
Returns
-------
bt : pybedtools.BedTool
BedTool with variants.
"""
s = (df.contig.astype(str) + '\t' +
(df.position - 1).astype(int).astype(str) + '\t' +
df.position.astype(int).astype(str) + '\t' +
df.tumor_name)
s = '\n'.join(s.values) + '\n'
bt = pbt.BedTool(s, from_string=True)
return bt | Convert MuTect results (read into dataframe) to BedTool object
Parameters
----------
df : pandas.DataFrame
Pandas DataFrame with MuTect results.
Returns
-------
bt : pybedtools.BedTool
BedTool with variants. |
def handle_assignattr_type(node, parent):
"""handle an astroid.assignattr node
handle instance_attrs_type
"""
try:
values = set(node.infer())
current = set(parent.instance_attrs_type[node.attrname])
parent.instance_attrs_type[node.attrname] = list(current | values)
except astroid.InferenceError:
pass | handle an astroid.assignattr node
handle instance_attrs_type |
def get_ref_dir(region, coordsys):
""" Finds and returns the reference direction for a given
HEALPix region string.
region : a string describing a HEALPix region
coordsys : coordinate system, GAL | CEL
"""
if region is None:
if coordsys == "GAL":
c = SkyCoord(0., 0., frame=Galactic, unit="deg")
elif coordsys == "CEL":
c = SkyCoord(0., 0., frame=ICRS, unit="deg")
return c
tokens = parse_hpxregion(region)
if tokens[0] in ['DISK', 'DISK_INC']:
if coordsys == "GAL":
c = SkyCoord(float(tokens[1]), float(
tokens[2]), frame=Galactic, unit="deg")
elif coordsys == "CEL":
c = SkyCoord(float(tokens[1]), float(
tokens[2]), frame=ICRS, unit="deg")
return c
elif tokens[0] == 'HPX_PIXEL':
nside_pix = int(tokens[2])
ipix_pix = int(tokens[3])
if tokens[1] == 'NESTED':
nest_pix = True
elif tokens[1] == 'RING':
nest_pix = False
else:
raise Exception(
"Did not recognize ordering scheme %s" % tokens[1])
theta, phi = hp.pix2ang(nside_pix, ipix_pix, nest_pix)
lat = np.degrees((np.pi / 2) - theta)
lon = np.degrees(phi)
if coordsys == "GAL":
c = SkyCoord(lon, lat, frame=Galactic, unit="deg")
elif coordsys == "CEL":
c = SkyCoord(lon, lat, frame=ICRS, unit="deg")
return c
else:
raise Exception(
"HPX.get_ref_dir did not recognize region type %s" % tokens[0])
return None | Finds and returns the reference direction for a given
HEALPix region string.
region : a string describing a HEALPix region
coordsys : coordinate system, GAL | CEL |
def figureStimulus(abf,sweeps=[0]):
"""
Create a plot of one area of interest of a single sweep.
"""
stimuli=[2.31250, 2.35270]
for sweep in sweeps:
abf.setsweep(sweep)
for stimulus in stimuli:
S1=int(abf.pointsPerSec*stimulus)
S2=int(abf.pointsPerSec*(stimulus+0.001)) # 1ms of blanking
abf.sweepY[S1:S2]=np.nan # blank out the stimulus area
I1=int(abf.pointsPerSec*2.2) # time point (sec) to start
I2=int(abf.pointsPerSec*2.6) # time point (sec) to end
baseline=np.average(abf.sweepY[int(abf.pointsPerSec*2.0):int(abf.pointsPerSec*2.2)])
Ys=lowPassFilter(abf.sweepY[I1:I2])-baseline
Xs=abf.sweepX2[I1:I1+len(Ys)].flatten()
plt.plot(Xs,Ys,alpha=.5,lw=2)
return | Create a plot of one area of interest of a single sweep. |
def _group_raw(self, raw_scores, cur=None, level=1):
"""
Internal recursive method to group raw scores into a cascading score summary.
Only top level items are tallied for scores.
@param list raw_scores: list of raw scores (Result objects)
"""
# BEGIN INTERNAL FUNCS ########################################
def trim_groups(r):
if isinstance(r.name, tuple) or isinstance(r.name, list):
new_name = r.name[1:]
else:
new_name = []
return Result(r.weight, r.value, new_name, r.msgs)
# CHECK FOR TERMINAL CONDITION: all raw_scores.name are single length
# @TODO could have a problem here with scalar name, but probably still works
terminal = [len(x.name) for x in raw_scores]
if terminal == [0] * len(raw_scores):
return []
def group_func(r):
"""
Takes a Result object and slices off the first element of its name
if its's a tuple. Otherwise, does nothing to the name. Returns the
Result's name and weight in a tuple to be used for sorting in that
order in a groupby function.
@param Result r
@return tuple (str, int)
"""
if isinstance(r.name, tuple) or isinstance(r.name, list):
if len(r.name) == 0:
retval = ''
else:
retval = r.name[0:1][0]
else:
retval = r.name
return retval, r.weight
# END INTERNAL FUNCS ##########################################
# NOTE until this point, *ALL* Results in raw_scores are
# individual Result objects.
# sort then group by name, then by priority weighting
grouped = itertools.groupby(sorted(raw_scores, key=group_func),
key=group_func)
# NOTE: post-grouping, grouped looks something like
# [(('Global Attributes', 1), <itertools._grouper at 0x7f10982b5390>),
# (('Global Attributes', 3), <itertools._grouper at 0x7f10982b5438>),
# (('Not a Global Attr', 1), <itertools._grouper at 0x7f10982b5470>)]
# (('Some Variable', 2), <itertools._grouper at 0x7f10982b5400>),
ret_val = []
for k, v in grouped: # iterate through the grouped tuples
k = k[0] # slice ("name", weight_val) --> "name"
v = list(v) # from itertools._grouper to list
cv = self._group_raw(list(map(trim_groups, v)), k, level + 1)
if len(cv):
# if this node has children, max weight of children + sum of all the scores
max_weight = max([x.weight for x in cv])
sum_scores = tuple(map(sum, list(zip(*([x.value for x in cv])))))
msgs = []
else:
max_weight = max([x.weight for x in v])
sum_scores = tuple(map(sum, list(zip(*([self._translate_value(x.value) for x in v])))))
msgs = sum([x.msgs for x in v], [])
ret_val.append(Result(name=k, weight=max_weight, value=sum_scores, children=cv, msgs=msgs))
return ret_val | Internal recursive method to group raw scores into a cascading score summary.
Only top level items are tallied for scores.
@param list raw_scores: list of raw scores (Result objects) |
def _update_yaw_and_pitch(self):
"""
Updates the camera vectors based on the current yaw and pitch
"""
front = Vector3([0.0, 0.0, 0.0])
front.x = cos(radians(self.yaw)) * cos(radians(self.pitch))
front.y = sin(radians(self.pitch))
front.z = sin(radians(self.yaw)) * cos(radians(self.pitch))
self.dir = vector.normalise(front)
self.right = vector.normalise(vector3.cross(self.dir, self._up))
self.up = vector.normalise(vector3.cross(self.right, self.dir)) | Updates the camera vectors based on the current yaw and pitch |
def convert_type(self, type):
"""Convert type to SQL
"""
# Default dialect
mapping = {
'any': sa.Text,
'array': None,
'boolean': sa.Boolean,
'date': sa.Date,
'datetime': sa.DateTime,
'duration': None,
'geojson': None,
'geopoint': None,
'integer': sa.Integer,
'number': sa.Float,
'object': None,
'string': sa.Text,
'time': sa.Time,
'year': sa.Integer,
'yearmonth': None,
}
# Postgresql dialect
if self.__dialect == 'postgresql':
mapping.update({
'array': JSONB,
'geojson': JSONB,
'number': sa.Numeric,
'object': JSONB,
})
# Not supported type
if type not in mapping:
message = 'Field type "%s" is not supported'
raise tableschema.exceptions.StorageError(message % type)
return mapping[type] | Convert type to SQL |
def optimize_branch_length(self, mode='joint', **kwargs):
"""
Perform optimization for the branch lengths of the entire tree.
This method only does a single path and needs to be iterated.
**Note** this method assumes that each node stores information
about its sequence as numpy.array object (node.sequence attribute).
Therefore, before calling this method, sequence reconstruction with
either of the available models must be performed.
Parameters
----------
mode : str
Optimize branch length assuming the joint ML sequence assignment
of both ends of the branch (:code:`joint`), or trace over all possible sequence
assignments on both ends of the branch (:code:`marginal`) (slower, experimental).
**kwargs :
Keyword arguments
Keyword Args
------------
verbose : int
Output level
store_old : bool
If True, the old lengths will be saved in :code:`node._old_dist` attribute.
Useful for testing, and special post-processing.
"""
self.logger("TreeAnc.optimize_branch_length: running branch length optimization in mode %s..."%mode,1)
if (self.tree is None) or (self.aln is None):
self.logger("TreeAnc.optimize_branch_length: ERROR, alignment or tree are missing", 0)
return ttconf.ERROR
store_old_dist = False
if 'store_old' in kwargs:
store_old_dist = kwargs['store_old']
if mode=='marginal':
# a marginal ancestral reconstruction is required for
# marginal branch length inference
if not hasattr(self.tree.root, "marginal_profile"):
self.infer_ancestral_sequences(marginal=True)
max_bl = 0
for node in self.tree.find_clades(order='postorder'):
if node.up is None: continue # this is the root
if store_old_dist:
node._old_length = node.branch_length
if mode=='marginal':
new_len = self.optimal_marginal_branch_length(node)
elif mode=='joint':
new_len = self.optimal_branch_length(node)
else:
self.logger("treeanc.optimize_branch_length: unsupported optimization mode",4, warn=True)
new_len = node.branch_length
if new_len < 0:
continue
self.logger("Optimization results: old_len=%.4e, new_len=%.4e, naive=%.4e"
" Updating branch length..."%(node.branch_length, new_len, len(node.mutations)*self.one_mutation), 5)
node.branch_length = new_len
node.mutation_length=new_len
max_bl = max(max_bl, new_len)
# as branch lengths changed, the params must be fixed
self.tree.root.up = None
self.tree.root.dist2root = 0.0
if max_bl>0.15 and mode=='joint':
self.logger("TreeAnc.optimize_branch_length: THIS TREE HAS LONG BRANCHES."
" \n\t ****TreeTime IS NOT DESIGNED TO OPTIMIZE LONG BRANCHES."
" \n\t ****PLEASE OPTIMIZE BRANCHES WITH ANOTHER TOOL AND RERUN WITH"
" \n\t ****branch_length_mode='input'", 0, warn=True)
self._prepare_nodes()
return ttconf.SUCCESS | Perform optimization for the branch lengths of the entire tree.
This method only does a single path and needs to be iterated.
**Note** this method assumes that each node stores information
about its sequence as numpy.array object (node.sequence attribute).
Therefore, before calling this method, sequence reconstruction with
either of the available models must be performed.
Parameters
----------
mode : str
Optimize branch length assuming the joint ML sequence assignment
of both ends of the branch (:code:`joint`), or trace over all possible sequence
assignments on both ends of the branch (:code:`marginal`) (slower, experimental).
**kwargs :
Keyword arguments
Keyword Args
------------
verbose : int
Output level
store_old : bool
If True, the old lengths will be saved in :code:`node._old_dist` attribute.
Useful for testing, and special post-processing. |
def x_axis_rotation(theta):
"""Generates a 3x3 rotation matrix for a rotation of angle
theta about the x axis.
Parameters
----------
theta : float
amount to rotate, in radians
Returns
-------
:obj:`numpy.ndarray` of float
A random 3x3 rotation matrix.
"""
R = np.array([[1, 0, 0,],
[0, np.cos(theta), -np.sin(theta)],
[0, np.sin(theta), np.cos(theta)]])
return R | Generates a 3x3 rotation matrix for a rotation of angle
theta about the x axis.
Parameters
----------
theta : float
amount to rotate, in radians
Returns
-------
:obj:`numpy.ndarray` of float
A random 3x3 rotation matrix. |
def promote_deployment_groups(self, id, groups=list()):
""" This endpoint is used to promote task groups that have canaries for a deployment. This should be done when
the placed canaries are healthy and the rolling upgrade of the remaining allocations should begin.
https://www.nomadproject.io/docs/http/deployments.html
arguments:
- id
- groups, (list) Specifies a particular set of task groups that should be promoted
returns: dict
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
promote_groups_json = {"Groups": groups,
"DeploymentID": id}
return self.request("promote", id, json=promote_groups_json, method="post").json() | This endpoint is used to promote task groups that have canaries for a deployment. This should be done when
the placed canaries are healthy and the rolling upgrade of the remaining allocations should begin.
https://www.nomadproject.io/docs/http/deployments.html
arguments:
- id
- groups, (list) Specifies a particular set of task groups that should be promoted
returns: dict
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException |
Subsets and Splits