code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def fixpointmethod(self, cfg_node):
"""The most important part of PyT, where we perform
the variant of reaching definitions to find where sources reach.
"""
JOIN = self.join(cfg_node)
# Assignment check
if isinstance(cfg_node, AssignmentNode):
arrow_result = JOIN
# Reassignment check
if cfg_node.left_hand_side not in cfg_node.right_hand_side_variables:
# Get previous assignments of cfg_node.left_hand_side and remove them from JOIN
arrow_result = self.arrow(JOIN, cfg_node.left_hand_side)
arrow_result = arrow_result | self.lattice.el2bv[cfg_node]
constraint_table[cfg_node] = arrow_result
# Default case
else:
constraint_table[cfg_node] = JOIN | The most important part of PyT, where we perform
the variant of reaching definitions to find where sources reach. |
def animate(self, animation, static, score, best, appear):
"""Handle animation."""
# Create a surface of static parts in the animation.
surface = pygame.Surface((self.game_width, self.game_height), 0)
surface.fill(self.BACKGROUND)
# Draw all static tiles.
for y in range(self.COUNT_Y):
for x in range(self.COUNT_X):
x1, y1 = self.get_tile_location(x, y)
x1 -= self.origin[0]
y1 -= self.origin[1]
surface.blit(self.tiles[static.get((x, y), 0)], (x1, y1))
# Pygame clock for FPS control.
clock = pygame.time.Clock()
if score:
score_label = self.label_font.render('+%d' % score, True, (119, 110, 101))
w1, h1 = score_label.get_size()
if best:
best_label = self.label_font.render('+%d' % best, True, (119, 110, 101))
w2, h2 = best_label.get_size()
# Loop through every frame.
for frame in range(self.ANIMATION_FRAMES):
# Limit at 60 fps.
clock.tick(60)
# Pump events.
pygame.event.pump()
self.screen.blit(surface, self.origin)
# Calculate animation progress.
dt = (frame + 0.) / self.ANIMATION_FRAMES
for tile in animation:
self.screen.blit(self.tiles[tile.value], tile.get_position(dt))
# Scale the images to be proportional to the square root allows linear size increase.
scale = dt ** 0.5
w, h = int(self.cell_width * scale) & ~1, int(self.cell_height * scale) & ~1
for x, y, value in appear:
self.screen.blit(self._scale_tile(value, w, h),
self._center_tile(self.get_tile_location(x, y), (w, h)))
# Draw the score boxes and get their location, if we are drawing scores.
if best or score:
(x1, y1), (x2, y2), w, h = self.draw_scores()
if score:
self.screen.blit(score_label, (x1 + (w - w1) / 2, y1 + (h - h1) / 2 - dt * h))
if best:
self.screen.blit(best_label, (x2 + (w - w2) / 2, y2 + (h - h2) / 2 - dt * h))
pygame.display.flip() | Handle animation. |
def compute(self, inputVector, learn, activeArray, applyLateralInhibition=True):
"""
This is the primary public method of the LateralPooler class. This
function takes a input vector and outputs the indices of the active columns.
If 'learn' is set to True, this method also updates the permanences of the
columns and their lateral inhibitory connection weights.
"""
if not isinstance(inputVector, np.ndarray):
raise TypeError("Input vector must be a numpy array, not %s" %
str(type(inputVector)))
if inputVector.size != self._numInputs:
raise ValueError(
"Input vector dimensions don't match. Expecting %s but got %s" % (
inputVector.size, self._numInputs))
self._updateBookeepingVars(learn)
inputVector = np.array(inputVector, dtype=realDType)
inputVector.reshape(-1)
self._overlaps = self._calculateOverlap(inputVector)
# Apply boosting when learning is on
if learn:
self._boostedOverlaps = self._boostFactors * self._overlaps
else:
self._boostedOverlaps = self._overlaps
# Apply inhibition to determine the winning columns
if applyLateralInhibition == True:
activeColumns = self._inhibitColumnsWithLateral(self._boostedOverlaps, self.lateralConnections)
else:
activeColumns = self._inhibitColumns(self._boostedOverlaps)
activeArray.fill(0)
activeArray[activeColumns] = 1.0
if learn:
self._adaptSynapses(inputVector, activeColumns, self._boostedOverlaps)
self._updateDutyCycles(self._overlaps, activeColumns)
self._bumpUpWeakColumns()
self._updateBoostFactors()
self._updateAvgActivityPairs(activeArray)
epsilon = self.lateralLearningRate
if epsilon > 0:
self._updateLateralConnections(epsilon, self.avgActivityPairs)
if self._isUpdateRound():
self._updateInhibitionRadius()
self._updateMinDutyCycles()
return activeArray | This is the primary public method of the LateralPooler class. This
function takes a input vector and outputs the indices of the active columns.
If 'learn' is set to True, this method also updates the permanences of the
columns and their lateral inhibitory connection weights. |
def get_page(self, index=None):
"""Return page widget"""
if index is None:
widget = self.pages_widget.currentWidget()
else:
widget = self.pages_widget.widget(index)
return widget.widget() | Return page widget |
def _adaptSynapses(self, inputVector, activeColumns, synPermActiveInc, synPermInactiveDec):
"""
The primary method in charge of learning. Adapts the permanence values of
the synapses based on the input vector, and the chosen columns after
inhibition round. Permanence values are increased for synapses connected to
input bits that are turned on, and decreased for synapses connected to
inputs bits that are turned off.
Parameters:
----------------------------
@param inputVector:
A numpy array of 0's and 1's that comprises the input to
the spatial pooler. There exists an entry in the array
for every input bit.
@param activeColumns:
An array containing the indices of the columns that
survived inhibition.
@param synPermActiveInc:
Permanence increment for active inputs
@param synPermInactiveDec:
Permanence decrement for inactive inputs
"""
inputIndices = numpy.where(inputVector > 0)[0]
permChanges = numpy.zeros(self.getNumInputs(), dtype=REAL_DTYPE)
permChanges.fill(-1 * synPermInactiveDec)
permChanges[inputIndices] = synPermActiveInc
perm = numpy.zeros(self.getNumInputs(), dtype=REAL_DTYPE)
potential = numpy.zeros(self.getNumInputs(), dtype=REAL_DTYPE)
for i in activeColumns:
self.getPermanence(i, perm)
self.getPotential(i, potential)
maskPotential = numpy.where(potential > 0)[0]
perm[maskPotential] += permChanges[maskPotential]
self._updatePermanencesForColumn(perm, i, raisePerm=False) | The primary method in charge of learning. Adapts the permanence values of
the synapses based on the input vector, and the chosen columns after
inhibition round. Permanence values are increased for synapses connected to
input bits that are turned on, and decreased for synapses connected to
inputs bits that are turned off.
Parameters:
----------------------------
@param inputVector:
A numpy array of 0's and 1's that comprises the input to
the spatial pooler. There exists an entry in the array
for every input bit.
@param activeColumns:
An array containing the indices of the columns that
survived inhibition.
@param synPermActiveInc:
Permanence increment for active inputs
@param synPermInactiveDec:
Permanence decrement for inactive inputs |
def to_array(tensor): # type: (TensorProto) -> np.ndarray[Any]
"""Converts a tensor def object to a numpy array.
Inputs:
tensor: a TensorProto object.
Returns:
arr: the converted array.
"""
if tensor.HasField("segment"):
raise ValueError(
"Currently not supporting loading segments.")
if tensor.data_type == TensorProto.UNDEFINED:
raise ValueError("The data type is not defined.")
tensor_dtype = tensor.data_type
np_dtype = mapping.TENSOR_TYPE_TO_NP_TYPE[tensor_dtype]
storage_type = mapping.TENSOR_TYPE_TO_STORAGE_TENSOR_TYPE[tensor_dtype]
storage_np_dtype = mapping.TENSOR_TYPE_TO_NP_TYPE[storage_type]
storage_field = mapping.STORAGE_TENSOR_TYPE_TO_FIELD[storage_type]
dims = tensor.dims
if tensor.data_type == TensorProto.STRING:
utf8_strings = getattr(tensor, storage_field)
ss = list(s.decode('utf-8') for s in utf8_strings)
return np.asarray(ss).astype(np_dtype).reshape(dims)
if tensor.HasField("raw_data"):
# Raw_bytes support: using frombuffer.
return np.frombuffer(
tensor.raw_data,
dtype=np_dtype).reshape(dims)
else:
data = getattr(tensor, storage_field), # type: Sequence[np.complex64]
if (tensor_dtype == TensorProto.COMPLEX64
or tensor_dtype == TensorProto.COMPLEX128):
data = combine_pairs_to_complex(data)
return (
np.asarray(
data,
dtype=storage_np_dtype)
.astype(np_dtype)
.reshape(dims)
) | Converts a tensor def object to a numpy array.
Inputs:
tensor: a TensorProto object.
Returns:
arr: the converted array. |
def calculate_incorrect_name_dict(graph: BELGraph) -> Mapping[str, str]:
"""Group all of the incorrect identifiers in a dict of {namespace: list of erroneous names}.
:return: A dictionary of {namespace: list of erroneous names}
"""
missing = defaultdict(list)
for _, e, ctx in graph.warnings:
if not isinstance(e, (MissingNamespaceNameWarning, MissingNamespaceRegexWarning)):
continue
missing[e.namespace].append(e.name)
return dict(missing) | Group all of the incorrect identifiers in a dict of {namespace: list of erroneous names}.
:return: A dictionary of {namespace: list of erroneous names} |
def delta(feat, N):
"""Compute delta features from a feature vector sequence.
:param feat: A numpy array of size (NUMFRAMES by number of features) containing features. Each row holds 1 feature vector.
:param N: For each frame, calculate delta features based on preceding and following N frames
:returns: A numpy array of size (NUMFRAMES by number of features) containing delta features. Each row holds 1 delta feature vector.
"""
if N < 1:
raise ValueError('N must be an integer >= 1')
NUMFRAMES = len(feat)
denominator = 2 * sum([i**2 for i in range(1, N+1)])
delta_feat = numpy.empty_like(feat)
padded = numpy.pad(feat, ((N, N), (0, 0)), mode='edge') # padded version of feat
for t in range(NUMFRAMES):
delta_feat[t] = numpy.dot(numpy.arange(-N, N+1), padded[t : t+2*N+1]) / denominator # [t : t+2*N+1] == [(N+t)-N : (N+t)+N+1]
return delta_feat | Compute delta features from a feature vector sequence.
:param feat: A numpy array of size (NUMFRAMES by number of features) containing features. Each row holds 1 feature vector.
:param N: For each frame, calculate delta features based on preceding and following N frames
:returns: A numpy array of size (NUMFRAMES by number of features) containing delta features. Each row holds 1 delta feature vector. |
def log_level_from_string(str_level):
""" Returns the proper log level core based on a given string
:param str_level: Log level string
:return: The log level code
"""
levels = {
'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG,
}
try:
return levels[str_level.upper()]
except KeyError:
pass
except AttributeError:
if str_level in [logging.DEBUG, logging.INFO, logging.WARNING,
logging.ERROR, logging.CRITICAL]:
return str_level
return logging.NOTSET | Returns the proper log level core based on a given string
:param str_level: Log level string
:return: The log level code |
def extend_schema(schema, documentAST=None):
"""Produces a new schema given an existing schema and a document which may
contain GraphQL type extensions and definitions. The original schema will
remain unaltered.
Because a schema represents a graph of references, a schema cannot be
extended without effectively making an entire copy. We do not know until it's
too late if subgraphs remain unchanged.
This algorithm copies the provided schema, applying extensions while
producing the copy. The original schema remains unaltered."""
assert isinstance(schema, GraphQLSchema), "Must provide valid GraphQLSchema"
assert documentAST and isinstance(
documentAST, ast.Document
), "Must provide valid Document AST"
# Collect the type definitions and extensions found in the document.
type_definition_map = {}
type_extensions_map = defaultdict(list)
for _def in documentAST.definitions:
if isinstance(
_def,
(
ast.ObjectTypeDefinition,
ast.InterfaceTypeDefinition,
ast.EnumTypeDefinition,
ast.UnionTypeDefinition,
ast.ScalarTypeDefinition,
ast.InputObjectTypeDefinition,
),
):
# Sanity check that none of the defined types conflict with the
# schema's existing types.
type_name = _def.name.value
if schema.get_type(type_name):
raise GraphQLError(
(
'Type "{}" already exists in the schema. It cannot also '
+ "be defined in this type definition."
).format(type_name),
[_def],
)
type_definition_map[type_name] = _def
elif isinstance(_def, ast.TypeExtensionDefinition):
# Sanity check that this type extension exists within the
# schema's existing types.
extended_type_name = _def.definition.name.value
existing_type = schema.get_type(extended_type_name)
if not existing_type:
raise GraphQLError(
(
'Cannot extend type "{}" because it does not '
+ "exist in the existing schema."
).format(extended_type_name),
[_def.definition],
)
if not isinstance(existing_type, GraphQLObjectType):
raise GraphQLError(
'Cannot extend non-object type "{}".'.format(extended_type_name),
[_def.definition],
)
type_extensions_map[extended_type_name].append(_def)
# Below are functions used for producing this schema that have closed over
# this scope and have access to the schema, cache, and newly defined types.
def get_type_from_def(type_def):
type = _get_named_type(type_def.name)
assert type, "Invalid schema"
return type
def get_type_from_AST(astNode):
type = _get_named_type(astNode.name.value)
if not type:
raise GraphQLError(
(
'Unknown type: "{}". Ensure that this type exists '
+ "either in the original schema, or is added in a type definition."
).format(astNode.name.value),
[astNode],
)
return type
# Given a name, returns a type from either the existing schema or an
# added type.
def _get_named_type(typeName):
cached_type_def = type_def_cache.get(typeName)
if cached_type_def:
return cached_type_def
existing_type = schema.get_type(typeName)
if existing_type:
type_def = extend_type(existing_type)
type_def_cache[typeName] = type_def
return type_def
type_ast = type_definition_map.get(typeName)
if type_ast:
type_def = build_type(type_ast)
type_def_cache[typeName] = type_def
return type_def
# Given a type's introspection result, construct the correct
# GraphQLType instance.
def extend_type(type):
if isinstance(type, GraphQLObjectType):
return extend_object_type(type)
if isinstance(type, GraphQLInterfaceType):
return extend_interface_type(type)
if isinstance(type, GraphQLUnionType):
return extend_union_type(type)
return type
def extend_object_type(type):
return GraphQLObjectType(
name=type.name,
description=type.description,
interfaces=lambda: extend_implemented_interfaces(type),
fields=lambda: extend_field_map(type),
)
def extend_interface_type(type):
return GraphQLInterfaceType(
name=type.name,
description=type.description,
fields=lambda: extend_field_map(type),
resolve_type=cannot_execute_client_schema,
)
def extend_union_type(type):
return GraphQLUnionType(
name=type.name,
description=type.description,
types=list(map(get_type_from_def, type.types)),
resolve_type=cannot_execute_client_schema,
)
def extend_implemented_interfaces(type):
interfaces = list(map(get_type_from_def, type.interfaces))
# If there are any extensions to the interfaces, apply those here.
extensions = type_extensions_map[type.name]
for extension in extensions:
for namedType in extension.definition.interfaces:
interface_name = namedType.name.value
if any([_def.name == interface_name for _def in interfaces]):
raise GraphQLError(
(
'Type "{}" already implements "{}". '
+ "It cannot also be implemented in this type extension."
).format(type.name, interface_name),
[namedType],
)
interfaces.append(get_type_from_AST(namedType))
return interfaces
def extend_field_map(type):
new_field_map = OrderedDict()
old_field_map = type.fields
for field_name, field in old_field_map.items():
new_field_map[field_name] = GraphQLField(
extend_field_type(field.type),
description=field.description,
deprecation_reason=field.deprecation_reason,
args=field.args,
resolver=cannot_execute_client_schema,
)
# If there are any extensions to the fields, apply those here.
extensions = type_extensions_map[type.name]
for extension in extensions:
for field in extension.definition.fields:
field_name = field.name.value
if field_name in old_field_map:
raise GraphQLError(
(
'Field "{}.{}" already exists in the '
+ "schema. It cannot also be defined in this type extension."
).format(type.name, field_name),
[field],
)
new_field_map[field_name] = GraphQLField(
build_field_type(field.type),
args=build_input_values(field.arguments),
resolver=cannot_execute_client_schema,
)
return new_field_map
def extend_field_type(type):
if isinstance(type, GraphQLList):
return GraphQLList(extend_field_type(type.of_type))
if isinstance(type, GraphQLNonNull):
return GraphQLNonNull(extend_field_type(type.of_type))
return get_type_from_def(type)
def build_type(type_ast):
_type_build = {
ast.ObjectTypeDefinition: build_object_type,
ast.InterfaceTypeDefinition: build_interface_type,
ast.UnionTypeDefinition: build_union_type,
ast.ScalarTypeDefinition: build_scalar_type,
ast.EnumTypeDefinition: build_enum_type,
ast.InputObjectTypeDefinition: build_input_object_type,
}
func = _type_build.get(type(type_ast))
if func:
return func(type_ast)
def build_object_type(type_ast):
return GraphQLObjectType(
type_ast.name.value,
interfaces=lambda: build_implemented_interfaces(type_ast),
fields=lambda: build_field_map(type_ast),
)
def build_interface_type(type_ast):
return GraphQLInterfaceType(
type_ast.name.value,
fields=lambda: build_field_map(type_ast),
resolve_type=cannot_execute_client_schema,
)
def build_union_type(type_ast):
return GraphQLUnionType(
type_ast.name.value,
types=list(map(get_type_from_AST, type_ast.types)),
resolve_type=cannot_execute_client_schema,
)
def build_scalar_type(type_ast):
return GraphQLScalarType(
type_ast.name.value,
serialize=lambda *args, **kwargs: None,
# Note: validation calls the parse functions to determine if a
# literal value is correct. Returning null would cause use of custom
# scalars to always fail validation. Returning false causes them to
# always pass validation.
parse_value=lambda *args, **kwargs: False,
parse_literal=lambda *args, **kwargs: False,
)
def build_enum_type(type_ast):
return GraphQLEnumType(
type_ast.name.value,
values={v.name.value: GraphQLEnumValue() for v in type_ast.values},
)
def build_input_object_type(type_ast):
return GraphQLInputObjectType(
type_ast.name.value,
fields=lambda: build_input_values(type_ast.fields, GraphQLInputObjectField),
)
def build_implemented_interfaces(type_ast):
return list(map(get_type_from_AST, type_ast.interfaces))
def build_field_map(type_ast):
return {
field.name.value: GraphQLField(
build_field_type(field.type),
args=build_input_values(field.arguments),
resolver=cannot_execute_client_schema,
)
for field in type_ast.fields
}
def build_input_values(values, input_type=GraphQLArgument):
input_values = OrderedDict()
for value in values:
type = build_field_type(value.type)
input_values[value.name.value] = input_type(
type, default_value=value_from_ast(value.default_value, type)
)
return input_values
def build_field_type(type_ast):
if isinstance(type_ast, ast.ListType):
return GraphQLList(build_field_type(type_ast.type))
if isinstance(type_ast, ast.NonNullType):
return GraphQLNonNull(build_field_type(type_ast.type))
return get_type_from_AST(type_ast)
# If this document contains no new types, then return the same unmodified
# GraphQLSchema instance.
if not type_extensions_map and not type_definition_map:
return schema
# A cache to use to store the actual GraphQLType definition objects by name.
# Initialize to the GraphQL built in scalars and introspection types. All
# functions below are inline so that this type def cache is within the scope
# of the closure.
type_def_cache = {
"String": GraphQLString,
"Int": GraphQLInt,
"Float": GraphQLFloat,
"Boolean": GraphQLBoolean,
"ID": GraphQLID,
"__Schema": __Schema,
"__Directive": __Directive,
"__DirectiveLocation": __DirectiveLocation,
"__Type": __Type,
"__Field": __Field,
"__InputValue": __InputValue,
"__EnumValue": __EnumValue,
"__TypeKind": __TypeKind,
}
# Get the root Query, Mutation, and Subscription types.
query_type = get_type_from_def(schema.get_query_type())
existing_mutation_type = schema.get_mutation_type()
mutationType = (
existing_mutation_type and get_type_from_def(existing_mutation_type) or None
)
existing_subscription_type = schema.get_subscription_type()
subscription_type = (
existing_subscription_type
and get_type_from_def(existing_subscription_type)
or None
)
# Iterate through all types, getting the type definition for each, ensuring
# that any type not directly referenced by a field will get created.
types = [get_type_from_def(_def) for _def in schema.get_type_map().values()]
# Do the same with new types, appending to the list of defined types.
types += [get_type_from_AST(_def) for _def in type_definition_map.values()]
# Then produce and return a Schema with these types.
return GraphQLSchema(
query=query_type,
mutation=mutationType,
subscription=subscription_type,
# Copy directives.
directives=schema.get_directives(),
types=types,
) | Produces a new schema given an existing schema and a document which may
contain GraphQL type extensions and definitions. The original schema will
remain unaltered.
Because a schema represents a graph of references, a schema cannot be
extended without effectively making an entire copy. We do not know until it's
too late if subgraphs remain unchanged.
This algorithm copies the provided schema, applying extensions while
producing the copy. The original schema remains unaltered. |
def _get_normalized_args(parser):
"""Return the parsed command line arguments.
Support the case when executed from a shebang, where all the
parameters come in sys.argv[1] in a single string separated
by spaces (in this case, the third parameter is what is being
executed)
"""
env = os.environ
if '_' in env and env['_'] != sys.argv[0] and len(sys.argv) >= 1 and " " in sys.argv[1]:
return parser.parse_args(shlex.split(sys.argv[1]) + sys.argv[2:])
else:
return parser.parse_args() | Return the parsed command line arguments.
Support the case when executed from a shebang, where all the
parameters come in sys.argv[1] in a single string separated
by spaces (in this case, the third parameter is what is being
executed) |
def reload_config(self, async=True, verbose=False):
'''
Initiate a config reload. This may take a while on large installations.
'''
# If we're using an API version older than 4.5.0, don't use async
api_version = float(self.api_version()['api_version'])
if api_version < 4.5:
async = False
url = '{}/{}{}'.format(
self.rest_url, 'reload', '?asynchronous=1' if async else ''
)
return self.__auth_req_post(url, verbose=verbose) | Initiate a config reload. This may take a while on large installations. |
def get_user_contact_lists_contacts(self, id, contact_list_id, **data):
"""
GET /users/:id/contact_lists/:contact_list_id/contacts/
Returns the :format:`contacts <contact>` on the contact list
as ``contacts``.
"""
return self.get("/users/{0}/contact_lists/{0}/contacts/".format(id,contact_list_id), data=data) | GET /users/:id/contact_lists/:contact_list_id/contacts/
Returns the :format:`contacts <contact>` on the contact list
as ``contacts``. |
def empty(cls: Type[BoardT], *, chess960: bool = False) -> BoardT:
"""Creates a new empty board. Also see :func:`~chess.Board.clear()`."""
return cls(None, chess960=chess960) | Creates a new empty board. Also see :func:`~chess.Board.clear()`. |
def coordinate(self, panes=[], index=0):
"""
Update pane coordinate tuples based on their height and width relative to other panes
within the dimensions of the current window.
We account for panes with a height of 1 where the bottom coordinates are the same as the top.
Account for floating panes and self-coordinating panes adjacent to panes set to EXPAND.
Coordinates are of the form:
[
((top-left-from-top, top-left-from-left),
(top-right-from-top, top-right-from-left)),
((bottom-left-from-top, bottom-left-from-left),
(bottom-right-from-top, bottom-right-from-left))
]
We can then use these to determine things such as whether corners are inverted and how
many characters may be drawn
"""
y = 0 # height
for i, element in enumerate(self.panes):
x = 0 # width
if isinstance(element, list):
current_height = 0
for j, pane in enumerate(element):
if pane.hidden: continue
current_width = pane.width
current_height = pane.height
upper = ((y, x), (y, x+current_width))
lower = ((y+(current_height if current_height > 1 else 0), x),
(y+(current_height if current_height > 1 else 0), x+current_width))
pane.coords = [upper, lower]
x += current_width
y += (current_height+1 if current_height > 1 else 1)
else:
if element.hidden: continue
current_width = element.width
current_height = element.height
upper = ((y, x), (y, x+current_width))
lower = ((y+(current_height if current_height > 1 else 0), x),
(y+(current_height if current_height > 1 else 0), x+current_width))
element.coords = [upper, lower]
y += (current_height+1 if current_height > 1 else 1)
if self.debug:
coordinates = "Coordinates: " + str([p.coords for p in self])
if len(coordinates) > self.width:
coordinates = coordinates[:self.width - 3]
coordinates += '...'
self.addstr(self.height-3, 0, coordinates) | Update pane coordinate tuples based on their height and width relative to other panes
within the dimensions of the current window.
We account for panes with a height of 1 where the bottom coordinates are the same as the top.
Account for floating panes and self-coordinating panes adjacent to panes set to EXPAND.
Coordinates are of the form:
[
((top-left-from-top, top-left-from-left),
(top-right-from-top, top-right-from-left)),
((bottom-left-from-top, bottom-left-from-left),
(bottom-right-from-top, bottom-right-from-left))
]
We can then use these to determine things such as whether corners are inverted and how
many characters may be drawn |
def _is_dynamic(v: Var) -> bool:
"""Return True if the Var holds a value which should be compiled to a dynamic
Var access."""
return (
Maybe(v.meta)
.map(lambda m: m.get(SYM_DYNAMIC_META_KEY, None)) # type: ignore
.or_else_get(False)
) | Return True if the Var holds a value which should be compiled to a dynamic
Var access. |
def as_create_table(self, table_name, overwrite=False):
"""Reformats the query into the create table as query.
Works only for the single select SQL statements, in all other cases
the sql query is not modified.
:param superset_query: string, sql query that will be executed
:param table_name: string, will contain the results of the
query execution
:param overwrite, boolean, table table_name will be dropped if true
:return: string, create table as query
"""
exec_sql = ''
sql = self.stripped()
if overwrite:
exec_sql = f'DROP TABLE IF EXISTS {table_name};\n'
exec_sql += f'CREATE TABLE {table_name} AS \n{sql}'
return exec_sql | Reformats the query into the create table as query.
Works only for the single select SQL statements, in all other cases
the sql query is not modified.
:param superset_query: string, sql query that will be executed
:param table_name: string, will contain the results of the
query execution
:param overwrite, boolean, table table_name will be dropped if true
:return: string, create table as query |
def _format_vector(self, vecs, form='broadcast'):
"""
Format a 3d vector field in certain ways, see `coords` for a description
of each formatting method.
"""
if form == 'meshed':
return np.meshgrid(*vecs, indexing='ij')
elif form == 'vector':
vecs = np.meshgrid(*vecs, indexing='ij')
return np.rollaxis(np.array(np.broadcast_arrays(*vecs)),0,self.dim+1)
elif form == 'flat':
return vecs
else:
return [v[self._coord_slicers[i]] for i,v in enumerate(vecs)] | Format a 3d vector field in certain ways, see `coords` for a description
of each formatting method. |
def focus_window(winhandle, path=None, name=None, sleeptime=.01):
"""
sudo apt-get install xautomation
apt-get install autokey-gtk
wmctrl -xa gnome-terminal.Gnome-terminal
wmctrl -xl
"""
import utool as ut
import time
print('focus: ' + winhandle)
args = ['wmctrl', '-xa', winhandle]
ut.cmd(*args, verbose=False, quiet=True)
time.sleep(sleeptime) | sudo apt-get install xautomation
apt-get install autokey-gtk
wmctrl -xa gnome-terminal.Gnome-terminal
wmctrl -xl |
def _next_state(index, event_time, transition_set, population_view):
"""Moves a population between different states using information from a `TransitionSet`.
Parameters
----------
index : iterable of ints
An iterable of integer labels for the simulants.
event_time : pandas.Timestamp
When this transition is occurring.
transition_set : TransitionSet
A set of potential transitions available to the simulants.
population_view : vivarium.framework.population.PopulationView
A view of the internal state of the simulation.
"""
if len(transition_set) == 0 or index.empty:
return
outputs, decisions = transition_set.choose_new_state(index)
groups = _groupby_new_state(index, outputs, decisions)
if groups:
for output, affected_index in sorted(groups, key=lambda x: str(x[0])):
if output == 'null_transition':
pass
elif isinstance(output, Transient):
if not isinstance(output, State):
raise ValueError('Invalid transition output: {}'.format(output))
output.transition_effect(affected_index, event_time, population_view)
output.next_state(affected_index, event_time, population_view)
elif isinstance(output, State):
output.transition_effect(affected_index, event_time, population_view)
else:
raise ValueError('Invalid transition output: {}'.format(output)) | Moves a population between different states using information from a `TransitionSet`.
Parameters
----------
index : iterable of ints
An iterable of integer labels for the simulants.
event_time : pandas.Timestamp
When this transition is occurring.
transition_set : TransitionSet
A set of potential transitions available to the simulants.
population_view : vivarium.framework.population.PopulationView
A view of the internal state of the simulation. |
def message(self, value):
"""
Setter for **self.__message** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) in (unicode, QString), \
"'{0}' attribute: '{1}' type is not 'unicode' or 'QString'!".format("message", value)
self.__message = value | Setter for **self.__message** attribute.
:param value: Attribute value.
:type value: unicode |
def guess_message_type(message):
"""
Guess the message type based on the class of message
:param message: Message to guess the type for
:type message: APPMessage
:return: The corresponding message type (MsgType) or None if not found
:rtype: None | int
"""
if isinstance(message, APPConfigMessage):
return MsgType.CONFIG
elif isinstance(message, APPJoinMessage):
return MsgType.JOIN
elif isinstance(message, APPDataMessage):
# All inheriting from this first !!
return MsgType.DATA
elif isinstance(message, APPUpdateMessage):
return MsgType.UPDATE
elif isinstance(message, APPUnjoinMessage):
return MsgType.UNJOIN
# APPMessage -> ACK?
return None | Guess the message type based on the class of message
:param message: Message to guess the type for
:type message: APPMessage
:return: The corresponding message type (MsgType) or None if not found
:rtype: None | int |
def pp_prep(self, mlt_df):
""" prepare pilot point based parameterizations
Parameters
----------
mlt_df : pandas.DataFrame
a dataframe with multiplier array information
Note
----
calls pyemu.pp_utils.setup_pilot_points_grid()
"""
if len(self.pp_props) == 0:
return
if self.pp_space is None:
self.logger.warn("pp_space is None, using 10...\n")
self.pp_space=10
if self.pp_geostruct is None:
self.logger.warn("pp_geostruct is None,"\
" using ExpVario with contribution=1 and a=(pp_space*max(delr,delc))")
pp_dist = self.pp_space * float(max(self.m.dis.delr.array.max(),
self.m.dis.delc.array.max()))
v = pyemu.geostats.ExpVario(contribution=1.0,a=pp_dist)
self.pp_geostruct = pyemu.geostats.GeoStruct(variograms=v)
pp_df = mlt_df.loc[mlt_df.suffix==self.pp_suffix,:]
layers = pp_df.layer.unique()
pp_dict = {l:list(pp_df.loc[pp_df.layer==l,"prefix"].unique()) for l in layers}
# big assumption here - if prefix is listed more than once, use the lowest layer index
for i,l in enumerate(layers):
p = set(pp_dict[l])
for ll in layers[i+1:]:
pp = set(pp_dict[ll])
d = pp - p
pp_dict[ll] = list(d)
pp_array_file = {p:m for p,m in zip(pp_df.prefix,pp_df.mlt_file)}
self.logger.statement("pp_dict: {0}".format(str(pp_dict)))
self.log("calling setup_pilot_point_grid()")
if self.use_pp_zones:
# check if k_zone_dict is a dictionary of dictionaries
if np.all([isinstance(v, dict) for v in self.k_zone_dict.values()]):
ib = {p.split('.')[-1]: k_dict for p, k_dict in self.k_zone_dict.items()}
for attr in pp_df.attr_name.unique():
if attr not in [p.split('.')[-1] for p in ib.keys()]:
if 'general_zn' not in ib.keys():
warnings.warn("Dictionary of dictionaries passed as zones, {0} not in keys: {1}. "
"Will use ibound for zones".format(attr, ib.keys()), PyemuWarning)
else:
self.logger.statement(
"Dictionary of dictionaries passed as pp zones, "
"using 'general_zn' for {0}".format(attr))
if 'general_zn' not in ib.keys():
ib['general_zn'] = {k: self.m.bas6.ibound[k].array for k in range(self.m.nlay)}
else:
ib = {'general_zn': self.k_zone_dict}
else:
ib = {}
for k in range(self.m.nlay):
a = self.m.bas6.ibound[k].array.copy()
a[a>0] = 1
ib[k] = a
for k,i in ib.items():
if np.any(i<0):
u,c = np.unique(i[i>0], return_counts=True)
counts = dict(zip(u,c))
mx = -1.0e+10
imx = None
for u,c in counts.items():
if c > mx:
mx = c
imx = u
self.logger.warn("resetting negative ibound values for PP zone"+ \
"array in layer {0} : {1}".format(k+1,u))
i[i<0] = u
ib = {'general_zn': ib}
pp_df = pyemu.pp_utils.setup_pilotpoints_grid(self.m,
ibound=ib,
use_ibound_zones=self.use_pp_zones,
prefix_dict=pp_dict,
every_n_cell=self.pp_space,
pp_dir=self.m.model_ws,
tpl_dir=self.m.model_ws,
shapename=os.path.join(
self.m.model_ws,"pp.shp"))
self.logger.statement("{0} pilot point parameters created".
format(pp_df.shape[0]))
self.logger.statement("pilot point 'pargp':{0}".
format(','.join(pp_df.pargp.unique())))
self.log("calling setup_pilot_point_grid()")
# calc factors for each layer
pargp = pp_df.pargp.unique()
pp_dfs_k = {}
fac_files = {}
pp_processed = set()
pp_df.loc[:,"fac_file"] = np.NaN
for pg in pargp:
ks = pp_df.loc[pp_df.pargp==pg,"k"].unique()
if len(ks) == 0:
self.logger.lraise("something is wrong in fac calcs for par group {0}".format(pg))
if len(ks) == 1:
if np.all([isinstance(v, dict) for v in ib.values()]): # check is dict of dicts
if np.any([pg.startswith(p) for p in ib.keys()]):
p = next(p for p in ib.keys() if pg.startswith(p))
# get dict relating to parameter prefix
ib_k = ib[p][ks[0]]
else:
p = 'general_zn'
ib_k = ib[p][ks[0]]
else:
ib_k = ib[ks[0]]
if len(ks) != 1: # TODO
#self.logger.lraise("something is wrong in fac calcs for par group {0}".format(pg))
self.logger.warn("multiple k values for {0},forming composite zone array...".format(pg))
ib_k = np.zeros((self.m.nrow,self.m.ncol))
for k in ks:
t = ib[k].copy()
t[t<1] = 0
ib_k[t>0] = t[t>0]
k = int(ks[0])
kattr_id = "{}_{}".format(k, p)
kp_id = "{}_{}".format(k, pg)
if kp_id not in pp_dfs_k.keys():
self.log("calculating factors for p={0}, k={1}".format(pg, k))
fac_file = os.path.join(self.m.model_ws, "pp_k{0}.fac".format(kattr_id))
var_file = fac_file.replace("{0}.fac", ".var.dat")
pp_df_k = pp_df.loc[pp_df.pargp == pg]
if kattr_id not in pp_processed:
self.logger.statement("saving krige variance file:{0}"
.format(var_file))
self.logger.statement("saving krige factors file:{0}"
.format(fac_file))
ok_pp = pyemu.geostats.OrdinaryKrige(self.pp_geostruct, pp_df_k)
ok_pp.calc_factors_grid(self.m.sr, var_filename=var_file, zone_array=ib_k)
ok_pp.to_grid_factors_file(fac_file)
pp_processed.add(kattr_id)
fac_files[kp_id] = fac_file
self.log("calculating factors for p={0}, k={1}".format(pg, k))
pp_dfs_k[kp_id] = pp_df_k
for kp_id, fac_file in fac_files.items():
k = int(kp_id.split('_')[0])
pp_prefix = kp_id.split('_', 1)[-1]
#pp_files = pp_df.pp_filename.unique()
fac_file = os.path.split(fac_file)[-1]
# pp_prefixes = pp_dict[k]
# for pp_prefix in pp_prefixes:
self.log("processing pp_prefix:{0}".format(pp_prefix))
if pp_prefix not in pp_array_file.keys():
self.logger.lraise("{0} not in self.pp_array_file.keys()".
format(pp_prefix,','.
join(pp_array_file.keys())))
out_file = os.path.join(self.arr_mlt,os.path.split(pp_array_file[pp_prefix])[-1])
pp_files = pp_df.loc[pp_df.pp_filename.apply(lambda x: pp_prefix in x),"pp_filename"]
if pp_files.unique().shape[0] != 1:
self.logger.lraise("wrong number of pp_files found:{0}".format(','.join(pp_files)))
pp_file = os.path.split(pp_files.iloc[0])[-1]
pp_df.loc[pp_df.pargp==pp_prefix,"fac_file"] = fac_file
pp_df.loc[pp_df.pargp==pp_prefix,"pp_file"] = pp_file
pp_df.loc[pp_df.pargp==pp_prefix,"out_file"] = out_file
pp_df.loc[:,"pargp"] = pp_df.pargp.apply(lambda x: "pp_{0}".format(x))
out_files = mlt_df.loc[mlt_df.mlt_file.
apply(lambda x: x.endswith(self.pp_suffix)),"mlt_file"]
#mlt_df.loc[:,"fac_file"] = np.NaN
#mlt_df.loc[:,"pp_file"] = np.NaN
for out_file in out_files:
pp_df_pf = pp_df.loc[pp_df.out_file==out_file,:]
fac_files = pp_df_pf.fac_file
if fac_files.unique().shape[0] != 1:
self.logger.lraise("wrong number of fac files:{0}".format(str(fac_files.unique())))
fac_file = fac_files.iloc[0]
pp_files = pp_df_pf.pp_file
if pp_files.unique().shape[0] != 1:
self.logger.lraise("wrong number of pp files:{0}".format(str(pp_files.unique())))
pp_file = pp_files.iloc[0]
mlt_df.loc[mlt_df.mlt_file==out_file,"fac_file"] = fac_file
mlt_df.loc[mlt_df.mlt_file==out_file,"pp_file"] = pp_file
self.par_dfs[self.pp_suffix] = pp_df
mlt_df.loc[mlt_df.suffix==self.pp_suffix,"tpl_file"] = np.NaN | prepare pilot point based parameterizations
Parameters
----------
mlt_df : pandas.DataFrame
a dataframe with multiplier array information
Note
----
calls pyemu.pp_utils.setup_pilot_points_grid() |
def filter_minreads_samples_from_table(table, minreads=1, inplace=True):
"""Filter samples from biom table that have less than
minreads reads total
Paraneters
----------
table : biom.Table
the biom table to filter
minreads : int (optional)
the minimal number of reads in a sample in order to keep it
inplace : bool (optional)
if True, filter the biom table in place, if false create a new copy
Returns
-------
table : biom.Table
the filtered biom table
"""
logger = logging.getLogger(__name__)
logger.debug('filter_minreads_started. minreads=%d' % minreads)
samp_sum = table.sum(axis='sample')
samp_ids = table.ids(axis='sample')
bad_samples = samp_ids[samp_sum < minreads]
if len(bad_samples) > 0:
logger.warn('removed %d samples with reads per sample<%d'
% (len(bad_samples), minreads))
table = table.filter(bad_samples, axis='sample',
inplace=inplace, invert=True)
else:
logger.debug('all samples contain > %d reads' % minreads)
return table | Filter samples from biom table that have less than
minreads reads total
Paraneters
----------
table : biom.Table
the biom table to filter
minreads : int (optional)
the minimal number of reads in a sample in order to keep it
inplace : bool (optional)
if True, filter the biom table in place, if false create a new copy
Returns
-------
table : biom.Table
the filtered biom table |
def get(self, sid):
"""
Constructs a ReservationContext
:param sid: The sid
:returns: twilio.rest.taskrouter.v1.workspace.worker.reservation.ReservationContext
:rtype: twilio.rest.taskrouter.v1.workspace.worker.reservation.ReservationContext
"""
return ReservationContext(
self._version,
workspace_sid=self._solution['workspace_sid'],
worker_sid=self._solution['worker_sid'],
sid=sid,
) | Constructs a ReservationContext
:param sid: The sid
:returns: twilio.rest.taskrouter.v1.workspace.worker.reservation.ReservationContext
:rtype: twilio.rest.taskrouter.v1.workspace.worker.reservation.ReservationContext |
def solveAndNotify(proto, exercise):
"""The user at the given AMP protocol has solved the given exercise.
This will log the solution and notify the user.
"""
exercise.solvedBy(proto.user)
proto.callRemote(ce.NotifySolved,
identifier=exercise.identifier,
title=exercise.title) | The user at the given AMP protocol has solved the given exercise.
This will log the solution and notify the user. |
def make_type(typename, lineno, implicit=False):
""" Converts a typename identifier (e.g. 'float') to
its internal symbol table entry representation.
Creates a type usage symbol stored in a AST
E.g. DIM a As Integer
will access Integer type
"""
assert isinstance(typename, str)
if not SYMBOL_TABLE.check_is_declared(typename, lineno, 'type'):
return None
type_ = symbols.TYPEREF(SYMBOL_TABLE.get_entry(typename), lineno, implicit)
return type_ | Converts a typename identifier (e.g. 'float') to
its internal symbol table entry representation.
Creates a type usage symbol stored in a AST
E.g. DIM a As Integer
will access Integer type |
def to_import(self):
# type: () -> ImportEndpoint
"""
Converts an EndpointDescription bean to an ImportEndpoint
:return: An ImportEndpoint bean
"""
# Properties
properties = self.get_properties()
# Framework UUID
fw_uid = self.get_framework_uuid()
# Endpoint name
try:
# From Pelix UID
name = properties[pelix.remote.PROP_ENDPOINT_NAME]
except KeyError:
# Generated
name = "{0}.{1}".format(fw_uid, self.get_service_id())
# Configuration / kind
configurations = self.get_configuration_types()
# Interfaces
specifications = self.get_interfaces()
return ImportEndpoint(
self.get_id(),
fw_uid,
configurations,
name,
specifications,
properties,
) | Converts an EndpointDescription bean to an ImportEndpoint
:return: An ImportEndpoint bean |
def revnet_cifar_base():
"""Tiny hparams suitable for CIFAR/etc."""
hparams = revnet_base()
hparams.num_channels_init_block = 32
hparams.first_batch_norm = [False, True, True]
hparams.init_stride = 1
hparams.init_kernel_size = 3
hparams.init_maxpool = False
hparams.strides = [1, 2, 2]
hparams.batch_size = 128
hparams.weight_decay = 1e-4
hparams.learning_rate = 0.1
hparams.learning_rate_cosine_cycle_steps = 5000
return hparams | Tiny hparams suitable for CIFAR/etc. |
def api_representation(self, content_type):
""" Returns the JSON representation of this message required for making requests to the API.
Args:
content_type (str): Either 'HTML' or 'Text'
"""
payload = dict(Subject=self.subject, Body=dict(ContentType=content_type, Content=self.body))
if self.sender is not None:
payload.update(From=self.sender.api_representation())
# A list of strings can also be provided for convenience. If provided, convert them into Contacts
if any(isinstance(item, str) for item in self.to):
self.to = [Contact(email=email) for email in self.to]
# Turn each contact into the JSON needed for the Outlook API
recipients = [contact.api_representation() for contact in self.to]
payload.update(ToRecipients=recipients)
# Conduct the same process for CC and BCC if needed
if self.cc:
if any(isinstance(email, str) for email in self.cc):
self.cc = [Contact(email) for email in self.cc]
cc_recipients = [contact.api_representation() for contact in self.cc]
payload.update(CcRecipients=cc_recipients)
if self.bcc:
if any(isinstance(email, str) for email in self.bcc):
self.bcc = [Contact(email) for email in self.bcc]
bcc_recipients = [contact.api_representation() for contact in self.bcc]
payload.update(BccRecipients=bcc_recipients)
if self._attachments:
payload.update(Attachments=[attachment.api_representation() for attachment in self._attachments])
payload.update(Importance=str(self.importance))
return dict(Message=payload) | Returns the JSON representation of this message required for making requests to the API.
Args:
content_type (str): Either 'HTML' or 'Text' |
def get_albums_for_artist(self, artist, full_album_art_uri=False):
"""Get an artist's albums.
Args:
artist (str): an artist's name.
full_album_art_uri: whether the album art URI should be
absolute (i.e. including the IP address). Default `False`.
Returns:
A `SearchResult` instance.
"""
subcategories = [artist]
result = self.get_album_artists(
full_album_art_uri=full_album_art_uri,
subcategories=subcategories,
complete_result=True)
reduced = [item for item in result if item.__class__ == DidlMusicAlbum]
# It is necessary to update the list of items in two places, due to
# a bug in SearchResult
result[:] = reduced
result._metadata.update({
'item_list': reduced,
'search_type': 'albums_for_artist',
'number_returned': len(reduced),
'total_matches': len(reduced)
})
return result | Get an artist's albums.
Args:
artist (str): an artist's name.
full_album_art_uri: whether the album art URI should be
absolute (i.e. including the IP address). Default `False`.
Returns:
A `SearchResult` instance. |
def disable_hostgroup_svc_checks(self, hostgroup):
"""Disable service checks for a hostgroup
Format of the line that triggers function call::
DISABLE_HOSTGROUP_SVC_CHECKS;<hostgroup_name>
:param hostgroup: hostgroup to disable
:type hostgroup: alignak.objects.hostgroup.Hostgroup
:return: None
"""
for host_id in hostgroup.get_hosts():
if host_id in self.daemon.hosts:
for service_id in self.daemon.hosts[host_id].services:
if service_id in self.daemon.services:
self.disable_svc_check(self.daemon.services[service_id]) | Disable service checks for a hostgroup
Format of the line that triggers function call::
DISABLE_HOSTGROUP_SVC_CHECKS;<hostgroup_name>
:param hostgroup: hostgroup to disable
:type hostgroup: alignak.objects.hostgroup.Hostgroup
:return: None |
def discover_engines(self, executor=None):
"""Discover configured engines.
:param executor: Optional executor module override
"""
if executor is None:
executor = getattr(settings, 'FLOW_EXECUTOR', {}).get('NAME', 'resolwe.flow.executors.local')
self.executor = self.load_executor(executor)
logger.info(
__("Loaded '{}' executor.", str(self.executor.__class__.__module__).replace('.prepare', ''))
)
expression_engines = getattr(settings, 'FLOW_EXPRESSION_ENGINES', ['resolwe.flow.expression_engines.jinja'])
self.expression_engines = self.load_expression_engines(expression_engines)
logger.info(__(
"Found {} expression engines: {}", len(self.expression_engines), ', '.join(self.expression_engines.keys())
))
execution_engines = getattr(settings, 'FLOW_EXECUTION_ENGINES', ['resolwe.flow.execution_engines.bash'])
self.execution_engines = self.load_execution_engines(execution_engines)
logger.info(__(
"Found {} execution engines: {}", len(self.execution_engines), ', '.join(self.execution_engines.keys())
)) | Discover configured engines.
:param executor: Optional executor module override |
def _sign(private_key, data, hash_algorithm, rsa_pss_padding=False):
"""
Generates an RSA, DSA or ECDSA signature
:param private_key:
The PrivateKey to generate the signature with
:param data:
A byte string of the data the signature is for
:param hash_algorithm:
A unicode string of "md5", "sha1", "sha224", "sha256", "sha384" or "sha512"
:param rsa_pss_padding:
If the private_key is an RSA key, this enables PSS padding
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the signature
"""
if not isinstance(private_key, PrivateKey):
raise TypeError(pretty_message(
'''
private_key must be an instance of PrivateKey, not %s
''',
type_name(private_key)
))
if not isinstance(data, byte_cls):
raise TypeError(pretty_message(
'''
data must be a byte string, not %s
''',
type_name(data)
))
valid_hash_algorithms = set(['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512'])
if private_key.algorithm == 'rsa' and not rsa_pss_padding:
valid_hash_algorithms |= set(['raw'])
if hash_algorithm not in valid_hash_algorithms:
valid_hash_algorithms_error = '"md5", "sha1", "sha224", "sha256", "sha384", "sha512"'
if private_key.algorithm == 'rsa' and not rsa_pss_padding:
valid_hash_algorithms_error += ', "raw"'
raise ValueError(pretty_message(
'''
hash_algorithm must be one of %s, not %s
''',
valid_hash_algorithms_error,
repr(hash_algorithm)
))
if private_key.algorithm != 'rsa' and rsa_pss_padding:
raise ValueError(pretty_message(
'''
PSS padding can only be used with RSA keys - the key provided is a
%s key
''',
private_key.algorithm.upper()
))
if private_key.algorithm == 'rsa' and hash_algorithm == 'raw':
if len(data) > private_key.byte_size - 11:
raise ValueError(pretty_message(
'''
data must be 11 bytes shorter than the key size when
hash_algorithm is "raw" - key size is %s bytes, but data is
%s bytes long
''',
private_key.byte_size,
len(data)
))
rsa = None
try:
rsa = libcrypto.EVP_PKEY_get1_RSA(private_key.evp_pkey)
if is_null(rsa):
handle_openssl_error(0)
buffer_size = libcrypto.EVP_PKEY_size(private_key.evp_pkey)
signature_buffer = buffer_from_bytes(buffer_size)
signature_length = libcrypto.RSA_private_encrypt(
len(data),
data,
signature_buffer,
rsa,
LibcryptoConst.RSA_PKCS1_PADDING
)
handle_openssl_error(signature_length)
return bytes_from_buffer(signature_buffer, signature_length)
finally:
if rsa:
libcrypto.RSA_free(rsa)
evp_md_ctx = None
rsa = None
dsa = None
dsa_sig = None
ec_key = None
ecdsa_sig = None
try:
if libcrypto_version_info < (1, 1):
evp_md_ctx = libcrypto.EVP_MD_CTX_create()
else:
evp_md_ctx = libcrypto.EVP_MD_CTX_new()
evp_md = {
'md5': libcrypto.EVP_md5,
'sha1': libcrypto.EVP_sha1,
'sha224': libcrypto.EVP_sha224,
'sha256': libcrypto.EVP_sha256,
'sha384': libcrypto.EVP_sha384,
'sha512': libcrypto.EVP_sha512
}[hash_algorithm]()
if libcrypto_version_info < (1,):
if private_key.algorithm == 'rsa' and rsa_pss_padding:
digest = getattr(hashlib, hash_algorithm)(data).digest()
rsa = libcrypto.EVP_PKEY_get1_RSA(private_key.evp_pkey)
if is_null(rsa):
handle_openssl_error(0)
buffer_size = libcrypto.EVP_PKEY_size(private_key.evp_pkey)
em_buffer = buffer_from_bytes(buffer_size)
res = libcrypto.RSA_padding_add_PKCS1_PSS(
rsa,
em_buffer,
digest,
evp_md,
LibcryptoConst.EVP_MD_CTX_FLAG_PSS_MDLEN
)
handle_openssl_error(res)
signature_buffer = buffer_from_bytes(buffer_size)
signature_length = libcrypto.RSA_private_encrypt(
buffer_size,
em_buffer,
signature_buffer,
rsa,
LibcryptoConst.RSA_NO_PADDING
)
handle_openssl_error(signature_length)
elif private_key.algorithm == 'rsa':
buffer_size = libcrypto.EVP_PKEY_size(private_key.evp_pkey)
signature_buffer = buffer_from_bytes(buffer_size)
signature_length = new(libcrypto, 'unsigned int *')
res = libcrypto.EVP_DigestInit_ex(evp_md_ctx, evp_md, null())
handle_openssl_error(res)
res = libcrypto.EVP_DigestUpdate(evp_md_ctx, data, len(data))
handle_openssl_error(res)
res = libcrypto.EVP_SignFinal(
evp_md_ctx,
signature_buffer,
signature_length,
private_key.evp_pkey
)
handle_openssl_error(res)
signature_length = deref(signature_length)
elif private_key.algorithm == 'dsa':
digest = getattr(hashlib, hash_algorithm)(data).digest()
dsa = libcrypto.EVP_PKEY_get1_DSA(private_key.evp_pkey)
if is_null(dsa):
handle_openssl_error(0)
dsa_sig = libcrypto.DSA_do_sign(digest, len(digest), dsa)
if is_null(dsa_sig):
handle_openssl_error(0)
buffer_size = libcrypto.i2d_DSA_SIG(dsa_sig, null())
signature_buffer = buffer_from_bytes(buffer_size)
signature_pointer = buffer_pointer(signature_buffer)
signature_length = libcrypto.i2d_DSA_SIG(dsa_sig, signature_pointer)
handle_openssl_error(signature_length)
elif private_key.algorithm == 'ec':
digest = getattr(hashlib, hash_algorithm)(data).digest()
ec_key = libcrypto.EVP_PKEY_get1_EC_KEY(private_key.evp_pkey)
if is_null(ec_key):
handle_openssl_error(0)
ecdsa_sig = libcrypto.ECDSA_do_sign(digest, len(digest), ec_key)
if is_null(ecdsa_sig):
handle_openssl_error(0)
buffer_size = libcrypto.i2d_ECDSA_SIG(ecdsa_sig, null())
signature_buffer = buffer_from_bytes(buffer_size)
signature_pointer = buffer_pointer(signature_buffer)
signature_length = libcrypto.i2d_ECDSA_SIG(ecdsa_sig, signature_pointer)
handle_openssl_error(signature_length)
else:
buffer_size = libcrypto.EVP_PKEY_size(private_key.evp_pkey)
signature_buffer = buffer_from_bytes(buffer_size)
signature_length = new(libcrypto, 'size_t *', buffer_size)
evp_pkey_ctx_pointer_pointer = new(libcrypto, 'EVP_PKEY_CTX **')
res = libcrypto.EVP_DigestSignInit(
evp_md_ctx,
evp_pkey_ctx_pointer_pointer,
evp_md,
null(),
private_key.evp_pkey
)
handle_openssl_error(res)
evp_pkey_ctx_pointer = unwrap(evp_pkey_ctx_pointer_pointer)
if rsa_pss_padding:
# Enable PSS padding
res = libcrypto.EVP_PKEY_CTX_ctrl(
evp_pkey_ctx_pointer,
LibcryptoConst.EVP_PKEY_RSA,
-1, # All operations
LibcryptoConst.EVP_PKEY_CTRL_RSA_PADDING,
LibcryptoConst.RSA_PKCS1_PSS_PADDING,
null()
)
handle_openssl_error(res)
# Use the hash algorithm output length as the salt length
res = libcrypto.EVP_PKEY_CTX_ctrl(
evp_pkey_ctx_pointer,
LibcryptoConst.EVP_PKEY_RSA,
LibcryptoConst.EVP_PKEY_OP_SIGN | LibcryptoConst.EVP_PKEY_OP_VERIFY,
LibcryptoConst.EVP_PKEY_CTRL_RSA_PSS_SALTLEN,
-1,
null()
)
handle_openssl_error(res)
res = libcrypto.EVP_DigestUpdate(evp_md_ctx, data, len(data))
handle_openssl_error(res)
res = libcrypto.EVP_DigestSignFinal(evp_md_ctx, signature_buffer, signature_length)
handle_openssl_error(res)
signature_length = deref(signature_length)
return bytes_from_buffer(signature_buffer, signature_length)
finally:
if evp_md_ctx:
if libcrypto_version_info < (1, 1):
libcrypto.EVP_MD_CTX_destroy(evp_md_ctx)
else:
libcrypto.EVP_MD_CTX_free(evp_md_ctx)
if rsa:
libcrypto.RSA_free(rsa)
if dsa:
libcrypto.DSA_free(dsa)
if dsa_sig:
libcrypto.DSA_SIG_free(dsa_sig)
if ec_key:
libcrypto.EC_KEY_free(ec_key)
if ecdsa_sig:
libcrypto.ECDSA_SIG_free(ecdsa_sig) | Generates an RSA, DSA or ECDSA signature
:param private_key:
The PrivateKey to generate the signature with
:param data:
A byte string of the data the signature is for
:param hash_algorithm:
A unicode string of "md5", "sha1", "sha224", "sha256", "sha384" or "sha512"
:param rsa_pss_padding:
If the private_key is an RSA key, this enables PSS padding
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the signature |
def get_data_home(data_home=None):
"""
Return the path of the revrand data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'revrand_data'
in the user home folder.
Alternatively, it can be set by the 'REVRAND_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
data_home_default = Path(__file__).ancestor(3).child('demos',
'_revrand_data')
if data_home is None:
data_home = os.environ.get('REVRAND_DATA', data_home_default)
if not os.path.exists(data_home):
os.makedirs(data_home)
return data_home | Return the path of the revrand data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'revrand_data'
in the user home folder.
Alternatively, it can be set by the 'REVRAND_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created. |
def insert_many(objects, using="default"):
"""Insert list of Django objects in one SQL query. Objects must be
of the same Django model. Note that save is not called and signals
on the model are not raised.
Mostly from: http://people.iola.dk/olau/python/bulkops.py
"""
if not objects:
return
import django.db.models
from django.db import connections
from django.db import transaction
con = connections[using]
model = objects[0].__class__
fields = [f for f in model._meta.fields
if not isinstance(f, django.db.models.AutoField)]
parameters = []
for o in objects:
params = tuple(f.get_db_prep_save(f.pre_save(o, True), connection=con)
for f in fields)
parameters.append(params)
table = model._meta.db_table
column_names = ",".join(con.ops.quote_name(f.column) for f in fields)
placeholders = ",".join(("%s",) * len(fields))
con.cursor().executemany("insert into %s (%s) values (%s)"
% (table, column_names, placeholders), parameters)
transaction.commit_unless_managed(using=using) | Insert list of Django objects in one SQL query. Objects must be
of the same Django model. Note that save is not called and signals
on the model are not raised.
Mostly from: http://people.iola.dk/olau/python/bulkops.py |
def exists(self, filename):
"""Report whether a file exists on the distribution point.
Determines file type by extension.
Args:
filename: Filename you wish to check. (No path! e.g.:
"AdobeFlashPlayer-14.0.0.176.pkg")
"""
if is_package(filename):
filepath = os.path.join(self.connection["mount_point"],
"Packages", filename)
else:
filepath = os.path.join(self.connection["mount_point"],
"Scripts", filename)
return os.path.exists(filepath) | Report whether a file exists on the distribution point.
Determines file type by extension.
Args:
filename: Filename you wish to check. (No path! e.g.:
"AdobeFlashPlayer-14.0.0.176.pkg") |
def create(self, validated_data):
"""
Create the video and its nested resources.
"""
courses = validated_data.pop("courses", [])
encoded_videos = validated_data.pop("encoded_videos", [])
video = Video.objects.create(**validated_data)
EncodedVideo.objects.bulk_create(
EncodedVideo(video=video, **video_data)
for video_data in encoded_videos
)
# The CourseSerializer will already have converted the course data
# to CourseVideo models, so we can just set the video and save.
# Also create VideoImage objects if an image filename is present
for course_video, image_name in courses:
course_video.video = video
course_video.save()
if image_name:
VideoImage.create_or_update(course_video, image_name)
return video | Create the video and its nested resources. |
def array(self):
"""
return the underlying numpy array
"""
return np.geomspace(self.start, self.stop, self.num, self.endpoint) | return the underlying numpy array |
def cal_k_vinet_from_v(v, v0, k0, k0p):
"""
calculate bulk modulus in GPa
:param v: unit-cell volume in A^3
:param v0: unit-cell volume in A^3 at 1 bar
:param k0: bulk modulus at reference conditions
:param k0p: pressure derivative of bulk modulus at reference conditions
:return: bulk modulus at high pressure in GPa
"""
x = v / v0
y = np.power(x, 1. / 3.)
eta = 1.5 * (k0p - 1.)
k = k0 * np.power(y, -2.) * (1. + (eta * y + 1.) * (1. - y)) * \
unp.exp((1. - y) * eta)
return k | calculate bulk modulus in GPa
:param v: unit-cell volume in A^3
:param v0: unit-cell volume in A^3 at 1 bar
:param k0: bulk modulus at reference conditions
:param k0p: pressure derivative of bulk modulus at reference conditions
:return: bulk modulus at high pressure in GPa |
def _CreatePlacemark(self, parent, name, style_id=None, visible=True,
description=None):
"""Create a KML Placemark element.
Args:
parent: The parent ElementTree.Element instance.
name: The placemark name as a string.
style_id: If not None, the id of a style to use for the placemark.
visible: Whether the placemark is initially visible or not.
description: A description string or None.
Returns:
The placemark ElementTree.Element instance.
"""
placemark = ET.SubElement(parent, 'Placemark')
placemark_name = ET.SubElement(placemark, 'name')
placemark_name.text = name
if description is not None:
desc_tag = ET.SubElement(placemark, 'description')
desc_tag.text = description
if style_id is not None:
styleurl = ET.SubElement(placemark, 'styleUrl')
styleurl.text = '#%s' % style_id
if not visible:
visibility = ET.SubElement(placemark, 'visibility')
visibility.text = '0'
return placemark | Create a KML Placemark element.
Args:
parent: The parent ElementTree.Element instance.
name: The placemark name as a string.
style_id: If not None, the id of a style to use for the placemark.
visible: Whether the placemark is initially visible or not.
description: A description string or None.
Returns:
The placemark ElementTree.Element instance. |
def optimize(lattice,
positions,
numbers,
displacements,
forces,
alm_options=None,
p2s_map=None,
p2p_map=None,
log_level=0):
"""Calculate force constants
lattice : array_like
Basis vectors. a, b, c are given as column vectors.
shape=(3, 3), dtype='double'
positions : array_like
Fractional coordinates of atomic points.
shape=(num_atoms, 3), dtype='double'
numbers : array_like
Atomic numbers.
shape=(num_atoms,), dtype='intc'
displacements : array_like
Atomic displacement patterns in supercells in Cartesian.
dtype='double', shape=(supercells, num_atoms, 3)
forces : array_like
Forces in supercells.
dtype='double', shape=(supercells, num_atoms, 3)
alm_options : dict, optional
Default is None.
List of keys
cutoff_distance : float
solver : str
Either 'SimplicialLDLT' or 'dense'. Default is
'SimplicialLDLT'.
"""
from alm import ALM
with ALM(lattice, positions, numbers) as alm:
natom = len(numbers)
alm.set_verbosity(log_level)
nkd = len(np.unique(numbers))
if 'cutoff_distance' not in alm_options:
rcs = -np.ones((2, nkd, nkd), dtype='double')
elif type(alm_options['cutoff_distance']) is float:
rcs = np.ones((2, nkd, nkd), dtype='double')
rcs[0] *= -1
rcs[1] *= alm_options['cutoff_distance']
alm.define(2, rcs)
alm.set_displacement_and_force(displacements, forces)
if 'solver' in alm_options:
solver = alm_options['solver']
else:
solver = 'SimplicialLDLT'
info = alm.optimize(solver=solver)
fc2 = extract_fc2_from_alm(alm,
natom,
atom_list=p2s_map,
p2s_map=p2s_map,
p2p_map=p2p_map)
fc3 = _extract_fc3_from_alm(alm,
natom,
p2s_map=p2s_map,
p2p_map=p2p_map)
return fc2, fc3 | Calculate force constants
lattice : array_like
Basis vectors. a, b, c are given as column vectors.
shape=(3, 3), dtype='double'
positions : array_like
Fractional coordinates of atomic points.
shape=(num_atoms, 3), dtype='double'
numbers : array_like
Atomic numbers.
shape=(num_atoms,), dtype='intc'
displacements : array_like
Atomic displacement patterns in supercells in Cartesian.
dtype='double', shape=(supercells, num_atoms, 3)
forces : array_like
Forces in supercells.
dtype='double', shape=(supercells, num_atoms, 3)
alm_options : dict, optional
Default is None.
List of keys
cutoff_distance : float
solver : str
Either 'SimplicialLDLT' or 'dense'. Default is
'SimplicialLDLT'. |
def get_or_create_hosted_zone(client, zone_name):
"""Get the Id of an existing zone, or create it.
Args:
client (:class:`botocore.client.Route53`): The connection used to
interact with Route53's API.
zone_name (string): The name of the DNS hosted zone to create.
Returns:
string: The Id of the Hosted Zone.
"""
zone_id = get_hosted_zone_by_name(client, zone_name)
if zone_id:
return zone_id
logger.debug("Zone %s does not exist, creating.", zone_name)
reference = uuid.uuid4().hex
response = client.create_hosted_zone(Name=zone_name,
CallerReference=reference)
return parse_zone_id(response["HostedZone"]["Id"]) | Get the Id of an existing zone, or create it.
Args:
client (:class:`botocore.client.Route53`): The connection used to
interact with Route53's API.
zone_name (string): The name of the DNS hosted zone to create.
Returns:
string: The Id of the Hosted Zone. |
def modify_server(self, UUID, **kwargs):
"""
modify_server allows updating the server's updateable_fields.
Note: Server's IP-addresses and Storages are managed by their own add/remove methods.
"""
body = dict()
body['server'] = {}
for arg in kwargs:
if arg not in Server.updateable_fields:
Exception('{0} is not an updateable field'.format(arg))
body['server'][arg] = kwargs[arg]
res = self.request('PUT', '/server/{0}'.format(UUID), body)
server = res['server']
# Populate subobjects
IPAddresses = IPAddress._create_ip_address_objs(server.pop('ip_addresses'),
cloud_manager=self)
storages = Storage._create_storage_objs(server.pop('storage_devices'),
cloud_manager=self)
return Server(
server,
ip_addresses=IPAddresses,
storage_devices=storages,
populated=True,
cloud_manager=self
) | modify_server allows updating the server's updateable_fields.
Note: Server's IP-addresses and Storages are managed by their own add/remove methods. |
def _get_generator(parser, extract, keep, check_maf):
"""Generates the data (with extract markers and keep, if required."""
if extract is not None:
parser = Extractor(parser, names=extract)
for data in parser.iter_genotypes():
data.genotypes = data.genotypes[keep]
# Checking the MAF, if required
if check_maf:
data.code_minor()
yield data | Generates the data (with extract markers and keep, if required. |
def _process_reservations(self, reservations):
"""
Given a dict with the structure of a response from boto3.ec2.describe_instances(...),
find the public/private ips.
:param reservations:
:return:
"""
reservations = reservations['Reservations']
private_ip_addresses = []
private_hostnames = []
public_ips = []
public_hostnames = []
for reservation in reservations:
for instance in reservation['Instances']:
private_ip_addresses.append(instance['PrivateIpAddress'])
private_hostnames.append(instance['PrivateDnsName'])
if 'PublicIpAddress' in instance:
public_ips.append(instance['PublicIpAddress'])
elif not self.remove_nones:
public_ips.append(None)
if ('PublicDnsName' in instance) & (not self.remove_nones):
public_hostnames.append(instance['PublicDnsName'])
elif not self.remove_nones:
public_hostnames.append(None)
return {
'private': {
'ips': private_ip_addresses,
'hostnames': private_hostnames
},
'public': {
'ips': public_ips,
'hostnames': public_hostnames
},
'reservations': reservations
} | Given a dict with the structure of a response from boto3.ec2.describe_instances(...),
find the public/private ips.
:param reservations:
:return: |
def xread(self, streams, count=None, block=None):
"""
Block and monitor multiple streams for new data.
streams: a dict of stream names to stream IDs, where
IDs indicate the last ID already seen.
count: if set, only return this many items, beginning with the
earliest available.
block: number of milliseconds to wait, if nothing already present.
"""
pieces = []
if block is not None:
if not isinstance(block, (int, long)) or block < 0:
raise DataError('XREAD block must be a non-negative integer')
pieces.append(Token.get_token('BLOCK'))
pieces.append(str(block))
if count is not None:
if not isinstance(count, (int, long)) or count < 1:
raise DataError('XREAD count must be a positive integer')
pieces.append(Token.get_token('COUNT'))
pieces.append(str(count))
if not isinstance(streams, dict) or len(streams) == 0:
raise DataError('XREAD streams must be a non empty dict')
pieces.append(Token.get_token('STREAMS'))
keys, values = izip(*iteritems(streams))
pieces.extend(keys)
pieces.extend(values)
return self.execute_command('XREAD', *pieces) | Block and monitor multiple streams for new data.
streams: a dict of stream names to stream IDs, where
IDs indicate the last ID already seen.
count: if set, only return this many items, beginning with the
earliest available.
block: number of milliseconds to wait, if nothing already present. |
def get_nn_shell_info(self, structure, site_idx, shell):
"""Get a certain nearest neighbor shell for a certain site.
Determines all non-backtracking paths through the neighbor network
computed by `get_nn_info`. The weight is determined by multiplying
the weight of the neighbor at each hop through the network. For
example, a 2nd-nearest-neighbor that has a weight of 1 from its
1st-nearest-neighbor and weight 0.5 from the original site will
be assigned a weight of 0.5.
As this calculation may involve computing the nearest neighbors of
atoms multiple times, the calculation starts by computing all of the
neighbor info and then calling `_get_nn_shell_info`. If you are likely
to call this method for more than one site, consider calling `get_all_nn`
first and then calling this protected method yourself.
Args:
structure (Molecule): Input structure
site_idx (int): index of site for which to determine neighbor
information.
shell (int): Which neighbor shell to retrieve (1 == 1st NN shell)
Returns:
list of dictionaries. Each entry in the list is information about
a certain neighbor in the structure, in the same format as
`get_nn_info`.
"""
all_nn_info = self.get_all_nn_info(structure)
sites = self._get_nn_shell_info(structure, all_nn_info, site_idx, shell)
# Update the site positions
# Did not do this during NN options because that can be slower
output = []
for info in sites:
orig_site = structure[info['site_index']]
info['site'] = Site(orig_site.species,
orig_site._coords,
properties=orig_site.properties)
output.append(info)
return output | Get a certain nearest neighbor shell for a certain site.
Determines all non-backtracking paths through the neighbor network
computed by `get_nn_info`. The weight is determined by multiplying
the weight of the neighbor at each hop through the network. For
example, a 2nd-nearest-neighbor that has a weight of 1 from its
1st-nearest-neighbor and weight 0.5 from the original site will
be assigned a weight of 0.5.
As this calculation may involve computing the nearest neighbors of
atoms multiple times, the calculation starts by computing all of the
neighbor info and then calling `_get_nn_shell_info`. If you are likely
to call this method for more than one site, consider calling `get_all_nn`
first and then calling this protected method yourself.
Args:
structure (Molecule): Input structure
site_idx (int): index of site for which to determine neighbor
information.
shell (int): Which neighbor shell to retrieve (1 == 1st NN shell)
Returns:
list of dictionaries. Each entry in the list is information about
a certain neighbor in the structure, in the same format as
`get_nn_info`. |
def update(self, time):
""" Update acceleration. Accounts for the importance and
priority (order) of multiple behaviors. """
# .... I feel this stuff could be done a lot better.
total_acceleration = Vector.null()
max_jerk = self.max_acceleration
for behavior in self.behaviors:
acceleration, importance = behavior.update()
weighted_acceleration = acceleration * importance
"""
if max_jerk >= weighted_acceleration.magnitude:
max_jerk -= weighted_acceleration.magnitude
total_acceleration += weighted_acceleration
elif max_jerk > 0 and max_jerk < weighted_acceleration.magnitude:
total_acceleration += weighted_acceleration.normal * max_jerk
break
else:
break """
total_acceleration += weighted_acceleration
self.acceleration = total_acceleration
# Update position and velocity.
Sprite.update(self, time)
# Update facing direction.
if self.velocity.magnitude > 0.0:
self.facing = self.velocity.normal | Update acceleration. Accounts for the importance and
priority (order) of multiple behaviors. |
def subscribe(self, handler, topic=None, options=None):
"""Subscribe to a topic for receiving events.
Replace :meth:`autobahn.wamp.interface.IApplicationSession.subscribe`
"""
def proxy_handler(*args, **kwargs):
return self._callbacks_runner.put(partial(handler, *args, **kwargs))
return self._async_session.subscribe(proxy_handler, topic=topic, options=options) | Subscribe to a topic for receiving events.
Replace :meth:`autobahn.wamp.interface.IApplicationSession.subscribe` |
def join_room(self, room_name):
""" Connects to a given room
If it does not exist it is created"""
logging.debug('Joining room {ro}'.format(ro=room_name))
for room in self.rooms:
if room.name == room_name:
room.add_user(self)
self._rooms[room_name] = room
room.welcome(self)
break
else:
room = Room(room_name)
self.rooms.append(room)
self._rooms[room_name] = room
room.add_user(self) | Connects to a given room
If it does not exist it is created |
def get_template_dirs():
"""Return a set of all template directories."""
temp_glob = rel_to_cwd('templates', '**', 'templates', 'config.yaml')
temp_groups = glob(temp_glob)
temp_groups = [get_parent_dir(path, 2) for path in temp_groups]
return set(temp_groups) | Return a set of all template directories. |
def pass_outflow_v1(self):
"""Update the outlet link sequence |dam_outlets.Q|."""
flu = self.sequences.fluxes.fastaccess
out = self.sequences.outlets.fastaccess
out.q[0] += flu.outflow | Update the outlet link sequence |dam_outlets.Q|. |
def replace_namespaced_horizontal_pod_autoscaler_status(self, name, namespace, body, **kwargs):
"""
replace status of the specified HorizontalPodAutoscaler
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_horizontal_pod_autoscaler_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the HorizontalPodAutoscaler (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V2beta1HorizontalPodAutoscaler body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V2beta1HorizontalPodAutoscaler
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_horizontal_pod_autoscaler_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_horizontal_pod_autoscaler_status_with_http_info(name, namespace, body, **kwargs)
return data | replace status of the specified HorizontalPodAutoscaler
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_horizontal_pod_autoscaler_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the HorizontalPodAutoscaler (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V2beta1HorizontalPodAutoscaler body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V2beta1HorizontalPodAutoscaler
If the method is called asynchronously,
returns the request thread. |
def get_project(self, project_id, include_capabilities=None, include_history=None):
"""GetProject.
Get project with the specified id or name, optionally including capabilities.
:param str project_id:
:param bool include_capabilities: Include capabilities (such as source control) in the team project result (default: false).
:param bool include_history: Search within renamed projects (that had such name in the past).
:rtype: :class:`<TeamProject> <azure.devops.v5_0.core.models.TeamProject>`
"""
route_values = {}
if project_id is not None:
route_values['projectId'] = self._serialize.url('project_id', project_id, 'str')
query_parameters = {}
if include_capabilities is not None:
query_parameters['includeCapabilities'] = self._serialize.query('include_capabilities', include_capabilities, 'bool')
if include_history is not None:
query_parameters['includeHistory'] = self._serialize.query('include_history', include_history, 'bool')
response = self._send(http_method='GET',
location_id='603fe2ac-9723-48b9-88ad-09305aa6c6e1',
version='5.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('TeamProject', response) | GetProject.
Get project with the specified id or name, optionally including capabilities.
:param str project_id:
:param bool include_capabilities: Include capabilities (such as source control) in the team project result (default: false).
:param bool include_history: Search within renamed projects (that had such name in the past).
:rtype: :class:`<TeamProject> <azure.devops.v5_0.core.models.TeamProject>` |
def strip_cdata(text):
"""Removes all CDATA blocks from `text` if it contains them.
Note:
If the function contains escaped XML characters outside of a
CDATA block, they will be unescaped.
Args:
A string containing one or more CDATA blocks.
Returns:
An XML unescaped string with CDATA block qualifiers removed.
"""
if not is_cdata(text):
return text
xml = "<e>{0}</e>".format(text)
node = etree.fromstring(xml)
return node.text | Removes all CDATA blocks from `text` if it contains them.
Note:
If the function contains escaped XML characters outside of a
CDATA block, they will be unescaped.
Args:
A string containing one or more CDATA blocks.
Returns:
An XML unescaped string with CDATA block qualifiers removed. |
def _writeData(self, command, device, params=()):
"""
Write the data to the device.
:Parameters:
command : `int`
The command to write to the device.
device : `int`
The device is the integer number of the hardware devices ID and
is only used with the Pololu Protocol.
params : `tuple`
Sequence of bytes to write.
:Exceptions:
* `SerialTimeoutException`
If the low level serial package times out.
* `SerialException`
IO error when the port is not open.
"""
sequence = []
if self._compact:
sequence.append(command | 0x80)
else:
sequence.append(self._BAUD_DETECT)
sequence.append(device)
sequence.append(command)
for param in params:
sequence.append(param)
if self._crc:
sequence.append(crc7(sequence))
self._serial.write(bytearray(sequence))
self._log and self._log.debug("Wrote byte sequence: %s",
[hex(num) for num in sequence]) | Write the data to the device.
:Parameters:
command : `int`
The command to write to the device.
device : `int`
The device is the integer number of the hardware devices ID and
is only used with the Pololu Protocol.
params : `tuple`
Sequence of bytes to write.
:Exceptions:
* `SerialTimeoutException`
If the low level serial package times out.
* `SerialException`
IO error when the port is not open. |
def list(self, end_date=values.unset, friendly_name=values.unset,
minutes=values.unset, start_date=values.unset,
task_channel=values.unset, split_by_wait_time=values.unset, limit=None,
page_size=None):
"""
Lists TaskQueuesStatisticsInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param datetime end_date: Filter cumulative statistics by an end date.
:param unicode friendly_name: Filter the TaskQueue stats based on a TaskQueue's name
:param unicode minutes: Filter cumulative statistics by up to 'x' minutes in the past.
:param datetime start_date: Filter cumulative statistics by a start date.
:param unicode task_channel: Filter real-time and cumulative statistics by TaskChannel.
:param unicode split_by_wait_time: A comma separated values for viewing splits of tasks canceled and accepted above the given threshold in seconds.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.taskrouter.v1.workspace.task_queue.task_queues_statistics.TaskQueuesStatisticsInstance]
"""
return list(self.stream(
end_date=end_date,
friendly_name=friendly_name,
minutes=minutes,
start_date=start_date,
task_channel=task_channel,
split_by_wait_time=split_by_wait_time,
limit=limit,
page_size=page_size,
)) | Lists TaskQueuesStatisticsInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param datetime end_date: Filter cumulative statistics by an end date.
:param unicode friendly_name: Filter the TaskQueue stats based on a TaskQueue's name
:param unicode minutes: Filter cumulative statistics by up to 'x' minutes in the past.
:param datetime start_date: Filter cumulative statistics by a start date.
:param unicode task_channel: Filter real-time and cumulative statistics by TaskChannel.
:param unicode split_by_wait_time: A comma separated values for viewing splits of tasks canceled and accepted above the given threshold in seconds.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.taskrouter.v1.workspace.task_queue.task_queues_statistics.TaskQueuesStatisticsInstance] |
def dispatch(self, tree):
"""Dispatcher function, dispatching tree type T to method _T."""
# display omp directive in python dump
for omp in metadata.get(tree, openmp.OMPDirective):
deps = list()
for dep in omp.deps:
old_file = self.f
self.f = io.StringIO()
self.dispatch(dep)
deps.append(self.f.getvalue())
self.f = old_file
directive = omp.s.format(*deps)
self._Expr(ast.Expr(ast.Str(s=directive)))
if isinstance(tree, list):
for t in tree:
self.dispatch(t)
return
meth = getattr(self, "_" + tree.__class__.__name__)
meth(tree) | Dispatcher function, dispatching tree type T to method _T. |
def add_trits(left, right):
# type: (Sequence[int], Sequence[int]) -> List[int]
"""
Adds two sequences of trits together.
The result is a list of trits equal in length to the longer of the
two sequences.
.. note::
Overflow is possible.
For example, ``add_trits([1], [1])`` returns ``[-1]``.
"""
target_len = max(len(left), len(right))
res = [0] * target_len
left += [0] * (target_len - len(left))
right += [0] * (target_len - len(right))
carry = 0
for i in range(len(res)):
res[i], carry = _full_add_trits(left[i], right[i], carry)
return res | Adds two sequences of trits together.
The result is a list of trits equal in length to the longer of the
two sequences.
.. note::
Overflow is possible.
For example, ``add_trits([1], [1])`` returns ``[-1]``. |
def npartitions(self):
"""
Get number of partitions (Spark only).
"""
if self.mode == 'spark':
return self.tordd().getNumPartitions()
else:
notsupported(self.mode) | Get number of partitions (Spark only). |
def select_best_url(self):
"""Select `best` url.
Since urls are pre-sorted w.r.t. their ping times, we simply return the first element
from the list. And we always return the same url unless we observe greater than max
allowed number of consecutive failures. In this case, we would return the next `best`
url, and append the previous best one to the end of list (essentially rotate to the left
by one element).
"""
best_url = self.parsed_urls[0]
try:
yield best_url
except Exception:
self.unsuccessful_calls[best_url] += 1
# Not thread-safe but pool used by cache is based on subprocesses, therefore no race.
if self.unsuccessful_calls[best_url] > self.max_failures:
self.parsed_urls.rotate(-1)
self.unsuccessful_calls[best_url] = 0
raise
else:
self.unsuccessful_calls[best_url] = 0 | Select `best` url.
Since urls are pre-sorted w.r.t. their ping times, we simply return the first element
from the list. And we always return the same url unless we observe greater than max
allowed number of consecutive failures. In this case, we would return the next `best`
url, and append the previous best one to the end of list (essentially rotate to the left
by one element). |
def send_like(self, *, user_id, times=1):
"""
发送好友赞
------------
:param int user_id: 对方 QQ 号
:param int times: 赞的次数,每个好友每天最多 10 次
:return: None
:rtype: None
"""
return super().__getattr__('send_like') \
(user_id=user_id, times=times) | 发送好友赞
------------
:param int user_id: 对方 QQ 号
:param int times: 赞的次数,每个好友每天最多 10 次
:return: None
:rtype: None |
def config(self):
"""The envs for this app."""
return self._h._get_resource(
resource=('apps', self.name, 'config_vars'),
obj=ConfigVars, app=self
) | The envs for this app. |
def centralManager_didConnectPeripheral_(self, manager, peripheral):
"""Called when a device is connected."""
logger.debug('centralManager_didConnectPeripheral called')
# Setup peripheral delegate and kick off service discovery. For now just
# assume all services need to be discovered.
peripheral.setDelegate_(self)
peripheral.discoverServices_(None)
# Fire connected event for device.
device = device_list().get(peripheral)
if device is not None:
device._set_connected() | Called when a device is connected. |
def portfolio_prices(
symbols=("AAPL", "GLD", "GOOG", "$SPX", "XOM", "msft"),
start=datetime.datetime(2005, 1, 1),
end=datetime.datetime(2011, 12, 31), # data stops at 2013/1/1
normalize=True,
allocation=None,
price_type='actual_close',
):
"""Calculate the Sharpe Ratio and other performance metrics for a portfolio
Arguments:
symbols (list of str): Ticker symbols like "GOOG", "AAPL", etc
start (datetime): The date at the start of the period being analyzed.
end (datetime): The date at the end of the period being analyzed.
normalize (bool): Whether to normalize prices to 1 at the start of the time series.
allocation (list of float): The portion of the portfolio allocated to each equity.
"""
symbols = normalize_symbols(symbols)
start = util.normalize_date(start)
end = util.normalize_date(end)
if allocation is None:
allocation = [1. / len(symbols)] * len(symbols)
if len(allocation) < len(symbols):
allocation = list(allocation) + [1. / len(symbols)] * (len(symbols) - len(allocation))
total = np.sum(allocation.sum)
allocation = np.array([(float(a) / total) for a in allocation])
timestamps = du.getNYSEdays(start, end, datetime.timedelta(hours=16))
ls_keys = [price_type]
ldf_data = da.get_data(timestamps, symbols, ls_keys)
d_data = dict(zip(ls_keys, ldf_data))
na_price = d_data[price_type].values
if normalize:
na_price /= na_price[0, :]
na_price *= allocation
return np.sum(na_price, axis=1) | Calculate the Sharpe Ratio and other performance metrics for a portfolio
Arguments:
symbols (list of str): Ticker symbols like "GOOG", "AAPL", etc
start (datetime): The date at the start of the period being analyzed.
end (datetime): The date at the end of the period being analyzed.
normalize (bool): Whether to normalize prices to 1 at the start of the time series.
allocation (list of float): The portion of the portfolio allocated to each equity. |
def run_as(user, domain, password, filename, logon_flag=1, work_dir="",
show_flag=Properties.SW_SHOWNORMAL):
"""
Runs an external program.
:param user: username The user name to use.
:param domain: The domain name to use.
:param password: The password to use.
:param logon_flag: 0 = do not load the user profile, 1 = (default) load
the user profile, 2 = use for net credentials only
:param filename: The name of the executable (EXE, BAT, COM, or PIF) to run.
:param work_dir: The working directory.
:param show_flag: The "show" flag of the executed program:
SW_HIDE = Hidden window
SW_MINIMIZE = Minimized window
SW_MAXIMIZE = Maximized window
:return:
"""
ret = AUTO_IT.AU3_RunAs(
LPCWSTR(user), LPCWSTR(domain), LPCWSTR(password), INT(logon_flag),
LPCWSTR(filename), LPCWSTR(work_dir), INT(show_flag)
)
return ret | Runs an external program.
:param user: username The user name to use.
:param domain: The domain name to use.
:param password: The password to use.
:param logon_flag: 0 = do not load the user profile, 1 = (default) load
the user profile, 2 = use for net credentials only
:param filename: The name of the executable (EXE, BAT, COM, or PIF) to run.
:param work_dir: The working directory.
:param show_flag: The "show" flag of the executed program:
SW_HIDE = Hidden window
SW_MINIMIZE = Minimized window
SW_MAXIMIZE = Maximized window
:return: |
def root(self, scope, names):
"""Find root of identifier, from scope
args:
scope (Scope): current scope
names (list): identifier name list (, separated identifiers)
returns:
list
"""
parent = scope.scopename
if parent:
parent = parent[-1]
if parent.parsed:
parsed_names = []
for name in names:
ampersand_count = name.count('&')
if ampersand_count:
filtered_parts = []
for part in parent.parsed:
if part and part[0] not in self._subp:
filtered_parts.append(part)
permutations = list(
utility.permutations_with_replacement(
filtered_parts, ampersand_count))
for permutation in permutations:
parsed = []
for name_part in name:
if name_part == "&":
parent_part = permutation.pop(0)
if parsed and parsed[-1].endswith(']'):
parsed.extend(' ')
if parent_part[-1] == ' ':
parent_part.pop()
parsed.extend(parent_part)
else:
parsed.append(name_part)
parsed_names.append(parsed)
else:
# NOTE(saschpe): Maybe this code can be expressed with permutations too?
for part in parent.parsed:
if part and part[0] not in self._subp:
parsed = []
if name[0] == "@media":
parsed.extend(name)
else:
parsed.extend(part)
if part[-1] != ' ':
parsed.append(' ')
parsed.extend(name)
parsed_names.append(parsed)
else:
parsed_names.append(name)
return parsed_names
return names | Find root of identifier, from scope
args:
scope (Scope): current scope
names (list): identifier name list (, separated identifiers)
returns:
list |
def update_scheme(current, target):
"""
Take the scheme from the current URL and applies it to the
target URL if the target URL startswith // or is missing a scheme
:param current: current URL
:param target: target URL
:return: target URL with the current URLs scheme
"""
target_p = urlparse(target)
if not target_p.scheme and target_p.netloc:
return "{0}:{1}".format(urlparse(current).scheme,
urlunparse(target_p))
elif not target_p.scheme and not target_p.netloc:
return "{0}://{1}".format(urlparse(current).scheme,
urlunparse(target_p))
else:
return target | Take the scheme from the current URL and applies it to the
target URL if the target URL startswith // or is missing a scheme
:param current: current URL
:param target: target URL
:return: target URL with the current URLs scheme |
def _encrypt(cipher, key, data, iv, padding):
"""
Encrypts plaintext
:param cipher:
A unicode string of "aes128", "aes192", "aes256", "des",
"tripledes_2key", "tripledes_3key", "rc2", "rc4"
:param key:
The encryption key - a byte string 5-32 bytes long
:param data:
The plaintext - a byte string
:param iv:
The initialization vector - a byte string - unused for RC4
:param padding:
Boolean, if padding should be used - unused for RC4
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by OpenSSL
:return:
A byte string of the ciphertext
"""
if not isinstance(key, byte_cls):
raise TypeError(pretty_message(
'''
key must be a byte string, not %s
''',
type_name(key)
))
if not isinstance(data, byte_cls):
raise TypeError(pretty_message(
'''
data must be a byte string, not %s
''',
type_name(data)
))
if cipher != 'rc4' and not isinstance(iv, byte_cls):
raise TypeError(pretty_message(
'''
iv must be a byte string, not %s
''',
type_name(iv)
))
if cipher != 'rc4' and not padding:
raise ValueError('padding must be specified')
evp_cipher_ctx = None
try:
evp_cipher_ctx = libcrypto.EVP_CIPHER_CTX_new()
if is_null(evp_cipher_ctx):
handle_openssl_error(0)
evp_cipher, buffer_size = _setup_evp_encrypt_decrypt(cipher, data)
if iv is None:
iv = null()
if cipher in set(['rc2', 'rc4']):
res = libcrypto.EVP_EncryptInit_ex(evp_cipher_ctx, evp_cipher, null(), null(), null())
handle_openssl_error(res)
res = libcrypto.EVP_CIPHER_CTX_set_key_length(evp_cipher_ctx, len(key))
handle_openssl_error(res)
if cipher == 'rc2':
res = libcrypto.EVP_CIPHER_CTX_ctrl(
evp_cipher_ctx,
LibcryptoConst.EVP_CTRL_SET_RC2_KEY_BITS,
len(key) * 8,
null()
)
handle_openssl_error(res)
evp_cipher = null()
res = libcrypto.EVP_EncryptInit_ex(evp_cipher_ctx, evp_cipher, null(), key, iv)
handle_openssl_error(res)
if padding is not None:
res = libcrypto.EVP_CIPHER_CTX_set_padding(evp_cipher_ctx, int(padding))
handle_openssl_error(res)
buffer = buffer_from_bytes(buffer_size)
output_length = new(libcrypto, 'int *')
res = libcrypto.EVP_EncryptUpdate(evp_cipher_ctx, buffer, output_length, data, len(data))
handle_openssl_error(res)
output = bytes_from_buffer(buffer, deref(output_length))
res = libcrypto.EVP_EncryptFinal_ex(evp_cipher_ctx, buffer, output_length)
handle_openssl_error(res)
output += bytes_from_buffer(buffer, deref(output_length))
return output
finally:
if evp_cipher_ctx:
libcrypto.EVP_CIPHER_CTX_free(evp_cipher_ctx) | Encrypts plaintext
:param cipher:
A unicode string of "aes128", "aes192", "aes256", "des",
"tripledes_2key", "tripledes_3key", "rc2", "rc4"
:param key:
The encryption key - a byte string 5-32 bytes long
:param data:
The plaintext - a byte string
:param iv:
The initialization vector - a byte string - unused for RC4
:param padding:
Boolean, if padding should be used - unused for RC4
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by OpenSSL
:return:
A byte string of the ciphertext |
def rst_table(self, array):
"""Given an array, the function formats and returns and table in rST format."""
# Determine cell width for each column
cell_dict = {}
for i, row in enumerate(array):
for j, val in enumerate(row):
if j not in cell_dict:
cell_dict[j] = []
cell_dict[j].append(val)
for item in cell_dict:
cell_dict[item] = max([len(x) for x in cell_dict[item]]) + 1 # Contains adapted width for each column
# Format top line
num_cols = len(array[0])
form = '+'
for col in range(num_cols):
form += (cell_dict[col] + 1) * '-'
form += '+'
form += '\n'
# Format values
for i, row in enumerate(array):
form += '| '
for j, val in enumerate(row):
cell_width = cell_dict[j]
form += str(val) + (cell_width - len(val)) * ' ' + '| '
form.rstrip()
form += '\n'
# Seperation lines
form += '+'
if i == 0:
sign = '='
else:
sign = '-'
for col in range(num_cols):
form += (cell_dict[col] + 1) * sign
form += '+'
form += '\n'
return form | Given an array, the function formats and returns and table in rST format. |
def ssn(self):
"""
Returns a 9 digits Dutch SSN called "burgerservicenummer (BSN)".
the Dutch "burgerservicenummer (BSN)" needs to pass the "11-proef",
which is a check digit approach; this function essentially reverses
the checksum steps to create a random valid BSN (which is 9 digits).
"""
# see http://nl.wikipedia.org/wiki/Burgerservicenummer (in Dutch)
def _checksum(digits):
factors = (9, 8, 7, 6, 5, 4, 3, 2, -1)
s = 0
for i in range(len(digits)):
s += digits[i] * factors[i]
return s
while True:
# create an array of first 8 elements initialized randomly
digits = self.generator.random.sample(range(10), 8)
# sum those 8 digits according to (part of) the "11-proef"
s = _checksum(digits)
# determine the last digit to make it qualify the test
digits.append((s % 11) % 10)
# repeat steps until it does qualify the test
if 0 == (_checksum(digits) % 11):
break
# build the resulting BSN
bsn = "".join([str(e) for e in digits])
# finally return our random but valid BSN
return bsn | Returns a 9 digits Dutch SSN called "burgerservicenummer (BSN)".
the Dutch "burgerservicenummer (BSN)" needs to pass the "11-proef",
which is a check digit approach; this function essentially reverses
the checksum steps to create a random valid BSN (which is 9 digits). |
def index(self, weighted=True, prune=False):
"""Return cube index measurement.
This function is deprecated. Use index_table from CubeSlice.
"""
warnings.warn(
"CrunchCube.index() is deprecated. Use CubeSlice.index_table().",
DeprecationWarning,
)
return Index.data(self, weighted, prune) | Return cube index measurement.
This function is deprecated. Use index_table from CubeSlice. |
def set_outgoing(self, value):
"""
Setter for 'outgoing' field.
:param value - a new value of 'outgoing' field. Must be a list of IDs (String type) of outgoing flows.
"""
if not isinstance(value, list):
raise TypeError("OutgoingList new value must be a list")
for element in value:
if not isinstance(element, str):
raise TypeError("OutgoingList elements in variable must be of String class")
self.__outgoing_list = value | Setter for 'outgoing' field.
:param value - a new value of 'outgoing' field. Must be a list of IDs (String type) of outgoing flows. |
def get(self, *args, **kwargs):
"""
Quick and dirty hack to fix change_view and delete_view; they use
self.queryset(request).get(...) to get the object they should work
with. Our modifications to the queryset when INCLUDE_ANCESTORS is
enabled make get() fail often with a MultipleObjectsReturned
exception.
"""
return self.model._default_manager.get(*args, **kwargs) | Quick and dirty hack to fix change_view and delete_view; they use
self.queryset(request).get(...) to get the object they should work
with. Our modifications to the queryset when INCLUDE_ANCESTORS is
enabled make get() fail often with a MultipleObjectsReturned
exception. |
def batch(self, client=None):
"""Return a batch to use as a context manager.
:type client: :class:`~google.cloud.logging.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current topic.
:rtype: :class:`Batch`
:returns: A batch to use as a context manager.
"""
client = self._require_client(client)
return Batch(self, client) | Return a batch to use as a context manager.
:type client: :class:`~google.cloud.logging.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current topic.
:rtype: :class:`Batch`
:returns: A batch to use as a context manager. |
def get_user_bookmarks(self, id, **data):
"""
GET /users/:id/bookmarks/
Gets all the user's saved events.
In order to update the saved events list, the user must unsave or save each event.
A user is authorized to only see his/her saved events.
"""
return self.get("/users/{0}/bookmarks/".format(id), data=data) | GET /users/:id/bookmarks/
Gets all the user's saved events.
In order to update the saved events list, the user must unsave or save each event.
A user is authorized to only see his/her saved events. |
def split_key(key):
"""Splits a node key."""
if key == KEY_SEP:
return ()
key_chunks = tuple(key.strip(KEY_SEP).split(KEY_SEP))
if key_chunks[0].startswith(KEY_SEP):
return (key_chunks[0][len(KEY_SEP):],) + key_chunks[1:]
else:
return key_chunks | Splits a node key. |
def load_airpassengers(as_series=False):
"""Monthly airline passengers.
The classic Box & Jenkins airline data. Monthly totals of international
airline passengers, 1949 to 1960.
Parameters
----------
as_series : bool, optional (default=False)
Whether to return a Pandas series. If False, will return a 1d
numpy array.
Returns
-------
rslt : array-like, shape=(n_samples,)
The time series vector.
Examples
--------
>>> from pmdarima.datasets import load_airpassengers
>>> load_airpassengers() # doctest: +SKIP
np.array([
112, 118, 132, 129, 121, 135, 148, 148, 136, 119, 104, 118,
115, 126, 141, 135, 125, 149, 170, 170, 158, 133, 114, 140,
145, 150, 178, 163, 172, 178, 199, 199, 184, 162, 146, 166,
171, 180, 193, 181, 183, 218, 230, 242, 209, 191, 172, 194,
196, 196, 236, 235, 229, 243, 264, 272, 237, 211, 180, 201,
204, 188, 235, 227, 234, 264, 302, 293, 259, 229, 203, 229,
242, 233, 267, 269, 270, 315, 364, 347, 312, 274, 237, 278,
284, 277, 317, 313, 318, 374, 413, 405, 355, 306, 271, 306,
315, 301, 356, 348, 355, 422, 465, 467, 404, 347, 305, 336,
340, 318, 362, 348, 363, 435, 491, 505, 404, 359, 310, 337,
360, 342, 406, 396, 420, 472, 548, 559, 463, 407, 362, 405,
417, 391, 419, 461, 472, 535, 622, 606, 508, 461, 390, 432])
>>> load_airpassengers(True).head()
0 112.0
1 118.0
2 132.0
3 129.0
4 121.0
dtype: float64
Notes
-----
This is monthly data, so *m* should be set to 12 when using in a seasonal
context.
References
----------
.. [1] Box, G. E. P., Jenkins, G. M. and Reinsel, G. C. (1976)
"Time Series Analysis, Forecasting and Control. Third Edition."
Holden-Day. Series G.
"""
rslt = np.array([
112, 118, 132, 129, 121, 135, 148, 148, 136, 119, 104, 118,
115, 126, 141, 135, 125, 149, 170, 170, 158, 133, 114, 140,
145, 150, 178, 163, 172, 178, 199, 199, 184, 162, 146, 166,
171, 180, 193, 181, 183, 218, 230, 242, 209, 191, 172, 194,
196, 196, 236, 235, 229, 243, 264, 272, 237, 211, 180, 201,
204, 188, 235, 227, 234, 264, 302, 293, 259, 229, 203, 229,
242, 233, 267, 269, 270, 315, 364, 347, 312, 274, 237, 278,
284, 277, 317, 313, 318, 374, 413, 405, 355, 306, 271, 306,
315, 301, 356, 348, 355, 422, 465, 467, 404, 347, 305, 336,
340, 318, 362, 348, 363, 435, 491, 505, 404, 359, 310, 337,
360, 342, 406, 396, 420, 472, 548, 559, 463, 407, 362, 405,
417, 391, 419, 461, 472, 535, 622, 606, 508, 461, 390, 432
]).astype(np.float64)
if as_series:
return pd.Series(rslt)
return rslt | Monthly airline passengers.
The classic Box & Jenkins airline data. Monthly totals of international
airline passengers, 1949 to 1960.
Parameters
----------
as_series : bool, optional (default=False)
Whether to return a Pandas series. If False, will return a 1d
numpy array.
Returns
-------
rslt : array-like, shape=(n_samples,)
The time series vector.
Examples
--------
>>> from pmdarima.datasets import load_airpassengers
>>> load_airpassengers() # doctest: +SKIP
np.array([
112, 118, 132, 129, 121, 135, 148, 148, 136, 119, 104, 118,
115, 126, 141, 135, 125, 149, 170, 170, 158, 133, 114, 140,
145, 150, 178, 163, 172, 178, 199, 199, 184, 162, 146, 166,
171, 180, 193, 181, 183, 218, 230, 242, 209, 191, 172, 194,
196, 196, 236, 235, 229, 243, 264, 272, 237, 211, 180, 201,
204, 188, 235, 227, 234, 264, 302, 293, 259, 229, 203, 229,
242, 233, 267, 269, 270, 315, 364, 347, 312, 274, 237, 278,
284, 277, 317, 313, 318, 374, 413, 405, 355, 306, 271, 306,
315, 301, 356, 348, 355, 422, 465, 467, 404, 347, 305, 336,
340, 318, 362, 348, 363, 435, 491, 505, 404, 359, 310, 337,
360, 342, 406, 396, 420, 472, 548, 559, 463, 407, 362, 405,
417, 391, 419, 461, 472, 535, 622, 606, 508, 461, 390, 432])
>>> load_airpassengers(True).head()
0 112.0
1 118.0
2 132.0
3 129.0
4 121.0
dtype: float64
Notes
-----
This is monthly data, so *m* should be set to 12 when using in a seasonal
context.
References
----------
.. [1] Box, G. E. P., Jenkins, G. M. and Reinsel, G. C. (1976)
"Time Series Analysis, Forecasting and Control. Third Edition."
Holden-Day. Series G. |
def _add_cadd_score(self, variant_obj, info_dict):
"""Add the cadd score to the variant
Args:
variant_obj (puzzle.models.Variant)
info_dict (dict): A info dictionary
"""
cadd_score = info_dict.get('CADD')
if cadd_score:
logger.debug("Updating cadd_score to: {0}".format(
cadd_score))
variant_obj.cadd_score = float(cadd_score) | Add the cadd score to the variant
Args:
variant_obj (puzzle.models.Variant)
info_dict (dict): A info dictionary |
def parse_device(lines):
"""Parse all the lines of a device block.
A device block is composed of a header line with the name of the device and
at least one extra line describing the device and its status. The extra
lines have a varying format depending on the status and personality of the
device (e.g. RAID1 vs RAID5, healthy vs recovery/resync).
"""
name, status_line, device = parse_device_header(lines.pop(0))
# There are edge cases when the device list is empty and the status line is
# merged with the header line, in those cases, the status line is returned
# from parse_device_header(), the rest of the time, it's the next line.
if not status_line:
status_line = lines.pop(0)
status = parse_device_status(status_line, device["personality"])
bitmap = None
resync = None
for line in lines:
if line.startswith(" bitmap:"):
bitmap = parse_device_bitmap(line)
elif line.startswith(" ["):
resync = parse_device_resync_progress(line)
elif line.startswith(" \tresync="):
resync = parse_device_resync_standby(line)
else:
raise NotImplementedError("unknown device line: {0}".format(line))
device.update({
"status": status,
"bitmap": bitmap,
"resync": resync,
})
return (name, device) | Parse all the lines of a device block.
A device block is composed of a header line with the name of the device and
at least one extra line describing the device and its status. The extra
lines have a varying format depending on the status and personality of the
device (e.g. RAID1 vs RAID5, healthy vs recovery/resync). |
def shell(self, name='default', user=None, password=None, root=0, verbose=1, write_password=1, no_db=0, no_pw=0):
"""
Opens a SQL shell to the given database, assuming the configured database
and user supports this feature.
"""
raise NotImplementedError | Opens a SQL shell to the given database, assuming the configured database
and user supports this feature. |
def course_or_program_exist(self, course_id, program_uuid):
"""
Return whether the input course or program exist.
"""
course_exists = course_id and CourseApiClient().get_course_details(course_id)
program_exists = program_uuid and CourseCatalogApiServiceClient().program_exists(program_uuid)
return course_exists or program_exists | Return whether the input course or program exist. |
def image_info(call=None, kwargs=None):
'''
Retrieves information for a given image. Either a name or an image_id must be
supplied.
.. versionadded:: 2016.3.0
name
The name of the image for which to gather information. Can be used instead
of ``image_id``.
image_id
The ID of the image for which to gather information. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f image_info opennebula name=my-image
salt-cloud --function image_info opennebula image_id=5
'''
if call != 'function':
raise SaltCloudSystemExit(
'The image_info function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
image_id = kwargs.get('image_id', None)
if image_id:
if name:
log.warning(
'Both the \'image_id\' and \'name\' arguments were provided. '
'\'image_id\' will take precedence.'
)
elif name:
image_id = get_image_id(kwargs={'name': name})
else:
raise SaltCloudSystemExit(
'The image_info function requires either a \'name or an \'image_id\' '
'to be provided.'
)
server, user, password = _get_xml_rpc()
auth = ':'.join([user, password])
info = {}
response = server.one.image.info(auth, int(image_id))[1]
tree = _get_xml(response)
info[tree.find('NAME').text] = _xml_to_dict(tree)
return info | Retrieves information for a given image. Either a name or an image_id must be
supplied.
.. versionadded:: 2016.3.0
name
The name of the image for which to gather information. Can be used instead
of ``image_id``.
image_id
The ID of the image for which to gather information. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f image_info opennebula name=my-image
salt-cloud --function image_info opennebula image_id=5 |
def AddLeNetModel(model, data):
'''
This part is the standard LeNet model: from data to the softmax prediction.
For each convolutional layer we specify dim_in - number of input channels
and dim_out - number or output channels. Also each Conv and MaxPool layer changes the
image size. For example, kernel of size 5 reduces each side of an image by 4.
While when we have kernel and stride sizes equal 2 in a MaxPool layer, it divides
each side in half.
'''
# Image size: 28 x 28 -> 24 x 24
conv1 = brew.conv(model, data, 'conv1', dim_in=1, dim_out=20, kernel=5)
# Image size: 24 x 24 -> 12 x 12
pool1 = brew.max_pool(model, conv1, 'pool1', kernel=2, stride=2)
# Image size: 12 x 12 -> 8 x 8
conv2 = brew.conv(model, pool1, 'conv2', dim_in=20, dim_out=100, kernel=5)
# Image size: 8 x 8 -> 4 x 4
pool2 = brew.max_pool(model, conv2, 'pool2', kernel=2, stride=2)
# 50 * 4 * 4 stands for dim_out from previous layer multiplied by the
# image size
fc3 = brew.fc(model, pool2, 'fc3', dim_in=100 * 4 * 4, dim_out=500)
relu = brew.relu(model, fc3, fc3)
pred = brew.fc(model, relu, 'pred', 500, 10)
softmax = brew.softmax(model, pred, 'softmax')
return softmax | This part is the standard LeNet model: from data to the softmax prediction.
For each convolutional layer we specify dim_in - number of input channels
and dim_out - number or output channels. Also each Conv and MaxPool layer changes the
image size. For example, kernel of size 5 reduces each side of an image by 4.
While when we have kernel and stride sizes equal 2 in a MaxPool layer, it divides
each side in half. |
def mount(cls, device, mount_directory, fs=None, options=None, cmd_timeout=None, sudo=False):
""" Mount a device to mount directory
:param device: device to mount
:param mount_directory: target directory where the given device will be mounted to
:param fs: optional, filesystem on the specified device. If specifies - overrides OS filesystem \
detection with this value.
:param options: specifies mount options (OS/filesystem dependent)
:param cmd_timeout: if specified - timeout with which this mount command should be evaluated (if \
command isn't complete within the given timeout - an exception will be raised)
:param sudo: whether to use sudo to run mount command
:return: None
"""
cmd = [] if sudo is False else ['sudo']
cmd.extend(['mount', device, os.path.abspath(mount_directory)])
if fs is not None:
cmd.extend(['-t', fs])
if options is not None and len(options) > 0:
cmd.append('-o')
cmd.extend(options)
subprocess.check_output(cmd, timeout=cmd_timeout) | Mount a device to mount directory
:param device: device to mount
:param mount_directory: target directory where the given device will be mounted to
:param fs: optional, filesystem on the specified device. If specifies - overrides OS filesystem \
detection with this value.
:param options: specifies mount options (OS/filesystem dependent)
:param cmd_timeout: if specified - timeout with which this mount command should be evaluated (if \
command isn't complete within the given timeout - an exception will be raised)
:param sudo: whether to use sudo to run mount command
:return: None |
def _pool_one_shape(features_2d, area_width, area_height, batch_size,
width, height, depth, fn=tf.reduce_max, name=None):
"""Pools for an area in features_2d.
Args:
features_2d: a Tensor in a shape of [batch_size, height, width, depth].
area_width: the max width allowed for an area.
area_height: the max height allowed for an area.
batch_size: the batch size.
width: the width of the memory.
height: the height of the memory.
depth: the depth of the features.
fn: the TF function for the pooling.
name: the op name.
Returns:
pool_tensor: A Tensor of shape [batch_size, num_areas, depth]
"""
with tf.name_scope(name, default_name="pool_one_shape"):
images = []
for y_shift in range(area_height):
image_height = tf.maximum(height - area_height + 1 + y_shift, 0)
for x_shift in range(area_width):
image_width = tf.maximum(width - area_width + 1 + x_shift, 0)
area = features_2d[:, y_shift:image_height, x_shift:image_width, :]
flatten_area = tf.reshape(area, [batch_size, -1, depth, 1])
images.append(flatten_area)
image_tensor = tf.concat(images, axis=3)
max_tensor = fn(image_tensor, axis=3)
return max_tensor | Pools for an area in features_2d.
Args:
features_2d: a Tensor in a shape of [batch_size, height, width, depth].
area_width: the max width allowed for an area.
area_height: the max height allowed for an area.
batch_size: the batch size.
width: the width of the memory.
height: the height of the memory.
depth: the depth of the features.
fn: the TF function for the pooling.
name: the op name.
Returns:
pool_tensor: A Tensor of shape [batch_size, num_areas, depth] |
def _remove_prefix(name):
"""Strip the possible prefix 'Table: ' from one or more table names."""
if isinstance(name, str):
return _do_remove_prefix(name)
return [_do_remove_prefix(nm) for nm in name] | Strip the possible prefix 'Table: ' from one or more table names. |
def subst_quoted_strings(sql, params):
"""Reverse operation to mark_quoted_strings - substitutes '@' by params.
"""
parts = sql.split('@')
params_dont_match = "number of parameters doesn' match the transformed query"
assert len(parts) == len(params) + 1, params_dont_match # would be internal error
out = []
for i, param in enumerate(params):
out.append(parts[i])
out.append("'%s'" % param.replace('\\', '\\\\').replace("\'", "\\\'"))
out.append(parts[-1])
return ''.join(out) | Reverse operation to mark_quoted_strings - substitutes '@' by params. |
def get_name(self, use_alias=True):
"""
Gets the name to reference the sorted field
:return: the name to reference the sorted field
:rtype: str
"""
if self.desc:
direction = 'DESC'
else:
direction = 'ASC'
if use_alias:
return '{0} {1}'.format(self.field.get_identifier(), direction)
return '{0} {1}'.format(self.field.get_select_sql(), direction) | Gets the name to reference the sorted field
:return: the name to reference the sorted field
:rtype: str |
def combine(self, other, func, fill_value=None, overwrite=True):
"""
Perform column-wise combine with another DataFrame.
Combines a DataFrame with `other` DataFrame using `func`
to element-wise combine columns. The row and column indexes of the
resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
The DataFrame to merge column-wise.
func : function
Function that takes two series as inputs and return a Series or a
scalar. Used to merge the two dataframes column by columns.
fill_value : scalar value, default None
The value to fill NaNs with prior to passing any column to the
merge func.
overwrite : bool, default True
If True, columns in `self` that do not exist in `other` will be
overwritten with NaNs.
Returns
-------
DataFrame
Combination of the provided DataFrames.
See Also
--------
DataFrame.combine_first : Combine two DataFrame objects and default to
non-null values in frame calling the method.
Examples
--------
Combine using a simple function that chooses the smaller column.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2
>>> df1.combine(df2, take_smaller)
A B
0 0 3
1 0 3
Example using a true element-wise combine function.
>>> df1 = pd.DataFrame({'A': [5, 0], 'B': [2, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, np.minimum)
A B
0 1 2
1 0 3
Using `fill_value` fills Nones prior to passing the column to the
merge function.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 4.0
However, if the same element in both dataframes is None, that None
is preserved
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 3.0
Example that demonstrates the use of `overwrite` and behavior when
the axis differ between the dataframes.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1], }, index=[1, 2])
>>> df1.combine(df2, take_smaller)
A B C
0 NaN NaN NaN
1 NaN 3.0 -10.0
2 NaN 3.0 1.0
>>> df1.combine(df2, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 -10.0
2 NaN 3.0 1.0
Demonstrating the preference of the passed in dataframe.
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1], }, index=[1, 2])
>>> df2.combine(df1, take_smaller)
A B C
0 0.0 NaN NaN
1 0.0 3.0 NaN
2 NaN 3.0 NaN
>>> df2.combine(df1, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
"""
other_idxlen = len(other.index) # save for compare
this, other = self.align(other, copy=False)
new_index = this.index
if other.empty and len(new_index) == len(self.index):
return self.copy()
if self.empty and len(other) == other_idxlen:
return other.copy()
# sorts if possible
new_columns = this.columns.union(other.columns)
do_fill = fill_value is not None
result = {}
for col in new_columns:
series = this[col]
otherSeries = other[col]
this_dtype = series.dtype
other_dtype = otherSeries.dtype
this_mask = isna(series)
other_mask = isna(otherSeries)
# don't overwrite columns unecessarily
# DO propagate if this column is not in the intersection
if not overwrite and other_mask.all():
result[col] = this[col].copy()
continue
if do_fill:
series = series.copy()
otherSeries = otherSeries.copy()
series[this_mask] = fill_value
otherSeries[other_mask] = fill_value
if col not in self.columns:
# If self DataFrame does not have col in other DataFrame,
# try to promote series, which is all NaN, as other_dtype.
new_dtype = other_dtype
try:
series = series.astype(new_dtype, copy=False)
except ValueError:
# e.g. new_dtype is integer types
pass
else:
# if we have different dtypes, possibly promote
new_dtype = find_common_type([this_dtype, other_dtype])
if not is_dtype_equal(this_dtype, new_dtype):
series = series.astype(new_dtype)
if not is_dtype_equal(other_dtype, new_dtype):
otherSeries = otherSeries.astype(new_dtype)
arr = func(series, otherSeries)
arr = maybe_downcast_to_dtype(arr, this_dtype)
result[col] = arr
# convert_objects just in case
return self._constructor(result, index=new_index,
columns=new_columns) | Perform column-wise combine with another DataFrame.
Combines a DataFrame with `other` DataFrame using `func`
to element-wise combine columns. The row and column indexes of the
resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
The DataFrame to merge column-wise.
func : function
Function that takes two series as inputs and return a Series or a
scalar. Used to merge the two dataframes column by columns.
fill_value : scalar value, default None
The value to fill NaNs with prior to passing any column to the
merge func.
overwrite : bool, default True
If True, columns in `self` that do not exist in `other` will be
overwritten with NaNs.
Returns
-------
DataFrame
Combination of the provided DataFrames.
See Also
--------
DataFrame.combine_first : Combine two DataFrame objects and default to
non-null values in frame calling the method.
Examples
--------
Combine using a simple function that chooses the smaller column.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2
>>> df1.combine(df2, take_smaller)
A B
0 0 3
1 0 3
Example using a true element-wise combine function.
>>> df1 = pd.DataFrame({'A': [5, 0], 'B': [2, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, np.minimum)
A B
0 1 2
1 0 3
Using `fill_value` fills Nones prior to passing the column to the
merge function.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 4.0
However, if the same element in both dataframes is None, that None
is preserved
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 3.0
Example that demonstrates the use of `overwrite` and behavior when
the axis differ between the dataframes.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1], }, index=[1, 2])
>>> df1.combine(df2, take_smaller)
A B C
0 NaN NaN NaN
1 NaN 3.0 -10.0
2 NaN 3.0 1.0
>>> df1.combine(df2, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 -10.0
2 NaN 3.0 1.0
Demonstrating the preference of the passed in dataframe.
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1], }, index=[1, 2])
>>> df2.combine(df1, take_smaller)
A B C
0 0.0 NaN NaN
1 0.0 3.0 NaN
2 NaN 3.0 NaN
>>> df2.combine(df1, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0 |
def to_method(func):
"""
Lift :func:`func` to a method; it will be called with the first argument
'self' ignored.
:param func: Any callable object
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
"""Wrapper function.
"""
return func(*args[1:], **kwargs)
return wrapper | Lift :func:`func` to a method; it will be called with the first argument
'self' ignored.
:param func: Any callable object |
def verify_embedding(emb, source, target, ignore_errors=()):
"""A simple (exception-raising) diagnostic for minor embeddings.
See :func:`diagnose_embedding` for a more detailed diagnostic / more information.
Args:
emb (dict): a dictionary mapping source nodes to arrays of target nodes
source (graph or edgelist): the graph to be embedded
target (graph or edgelist): the graph being embedded into
Raises:
EmbeddingError: a catch-all class for the below
MissingChainError: in case a key is missing from `emb`, or the associated chain is empty
ChainOverlapError: in case two chains contain the same target node
DisconnectedChainError: in case a chain is disconnected
InvalidNodeError: in case a chain contains a node label not found in `target`
MissingEdgeError: in case a source edge is not represented by any target edges
Returns:
bool: True (if no exception is raised)
"""
for error in diagnose_embedding(emb, source, target):
eclass = error[0]
if eclass not in ignore_errors:
raise eclass(*error[1:])
return True | A simple (exception-raising) diagnostic for minor embeddings.
See :func:`diagnose_embedding` for a more detailed diagnostic / more information.
Args:
emb (dict): a dictionary mapping source nodes to arrays of target nodes
source (graph or edgelist): the graph to be embedded
target (graph or edgelist): the graph being embedded into
Raises:
EmbeddingError: a catch-all class for the below
MissingChainError: in case a key is missing from `emb`, or the associated chain is empty
ChainOverlapError: in case two chains contain the same target node
DisconnectedChainError: in case a chain is disconnected
InvalidNodeError: in case a chain contains a node label not found in `target`
MissingEdgeError: in case a source edge is not represented by any target edges
Returns:
bool: True (if no exception is raised) |
def _range_from_slice(myslice, start=None, stop=None, step=None, length=None):
"""Convert a slice to an array of integers."""
assert isinstance(myslice, slice)
# Find 'step'.
step = myslice.step if myslice.step is not None else step
if step is None:
step = 1
# Find 'start'.
start = myslice.start if myslice.start is not None else start
if start is None:
start = 0
# Find 'stop' as a function of length if 'stop' is unspecified.
stop = myslice.stop if myslice.stop is not None else stop
if length is not None:
stop_inferred = floor(start + step * length)
if stop is not None and stop < stop_inferred:
raise ValueError("'stop' ({stop}) and ".format(stop=stop) +
"'length' ({length}) ".format(length=length) +
"are not compatible.")
stop = stop_inferred
if stop is None and length is None:
raise ValueError("'stop' and 'length' cannot be both unspecified.")
myrange = np.arange(start, stop, step)
# Check the length if it was specified.
if length is not None:
assert len(myrange) == length
return myrange | Convert a slice to an array of integers. |
def get_certificates(self):
"""Get user's certificates."""
for certificate in self.user_data.certificates:
certificate['datetime'] = certificate['datetime'].strip()
return self.user_data.certificates | Get user's certificates. |
def from_header(self, header):
"""Generate a SpanContext object using the trace context header.
The value of enabled parsed from header is int. Need to convert to
bool.
:type header: str
:param header: Trace context header which was extracted from the HTTP
request headers.
:rtype: :class:`~opencensus.trace.span_context.SpanContext`
:returns: SpanContext generated from the trace context header.
"""
if header is None:
return SpanContext()
try:
match = re.search(_TRACE_CONTEXT_HEADER_RE, header)
except TypeError:
logging.warning(
'Header should be str, got {}. Cannot parse the header.'
.format(header.__class__.__name__))
raise
if match:
trace_id = match.group(1)
span_id = match.group(3)
trace_options = match.group(5)
if trace_options is None:
trace_options = 1
span_context = SpanContext(
trace_id=trace_id,
span_id=span_id,
trace_options=TraceOptions(trace_options),
from_header=True)
return span_context
else:
logging.warning(
'Cannot parse the header {}, generate a new context instead.'
.format(header))
return SpanContext() | Generate a SpanContext object using the trace context header.
The value of enabled parsed from header is int. Need to convert to
bool.
:type header: str
:param header: Trace context header which was extracted from the HTTP
request headers.
:rtype: :class:`~opencensus.trace.span_context.SpanContext`
:returns: SpanContext generated from the trace context header. |
def _render_template_block_nodelist(nodelist, block_name, context):
"""Recursively iterate over a node to find the wanted block."""
# Attempt to find the wanted block in the current template.
for node in nodelist:
# If the wanted block was found, return it.
if isinstance(node, BlockNode):
# No matter what, add this block to the rendering context.
context.render_context[BLOCK_CONTEXT_KEY].push(node.name, node)
# If the name matches, you're all set and we found the block!
if node.name == block_name:
return node.render(context)
# If a node has children, recurse into them. Based on
# django.template.base.Node.get_nodes_by_type.
for attr in node.child_nodelists:
try:
new_nodelist = getattr(node, attr)
except AttributeError:
continue
# Try to find the block recursively.
try:
return _render_template_block_nodelist(new_nodelist, block_name, context)
except BlockNotFound:
continue
# The wanted block_name was not found.
raise BlockNotFound("block with name '%s' does not exist" % block_name) | Recursively iterate over a node to find the wanted block. |
def f_contains(self, item, with_links=True, shortcuts=False, max_depth=None):
"""Checks if the node contains a specific parameter or result.
It is checked if the item can be found via the
:func:`~pypet.naturalnaming.NNGroupNode.f_get` method.
:param item: Parameter/Result name or instance.
If a parameter or result instance is supplied it is also checked if
the provided item and the found item are exactly the same instance, i.e.
`id(item)==id(found_item)`.
:param with_links:
If links are considered.
:param shortcuts:
Shortcuts is `False` the name you supply must
be found in the tree WITHOUT hopping over nodes in between.
If `shortcuts=False` and you supply a
non colon separated (short) name, than the name must be found
in the immediate children of your current node.
Otherwise searching via shortcuts is allowed.
:param max_depth:
If shortcuts is `True` than the maximum search depth
can be specified. `None` means no limit.
:return: True or False
"""
# Check if an instance or a name was supplied by the user
try:
search_string = item.v_full_name
parent_full_name = self.v_full_name
if not search_string.startswith(parent_full_name):
return False
if parent_full_name != '':
search_string = search_string[len(parent_full_name) + 1:]
else:
search_string = search_string
shortcuts = False # if we search for a particular item we do not allow shortcuts
except AttributeError:
search_string = item
item = None
if search_string == '':
return False # To allow to search for nodes wit name = '', which are never part
# of the trajectory
try:
result = self.f_get(search_string,
shortcuts=shortcuts, max_depth=max_depth, with_links=with_links)
except AttributeError:
return False
if item is not None:
return id(item) == id(result)
else:
return True | Checks if the node contains a specific parameter or result.
It is checked if the item can be found via the
:func:`~pypet.naturalnaming.NNGroupNode.f_get` method.
:param item: Parameter/Result name or instance.
If a parameter or result instance is supplied it is also checked if
the provided item and the found item are exactly the same instance, i.e.
`id(item)==id(found_item)`.
:param with_links:
If links are considered.
:param shortcuts:
Shortcuts is `False` the name you supply must
be found in the tree WITHOUT hopping over nodes in between.
If `shortcuts=False` and you supply a
non colon separated (short) name, than the name must be found
in the immediate children of your current node.
Otherwise searching via shortcuts is allowed.
:param max_depth:
If shortcuts is `True` than the maximum search depth
can be specified. `None` means no limit.
:return: True or False |
def _RunIpRoute(self, args=None, options=None):
"""Run a command with ip route and return the response.
Args:
args: list, the string ip route command args to execute.
options: dict, the string parameters to append to the ip route command.
Returns:
string, the standard output from the ip route command execution.
"""
args = args or []
options = options or {}
command = ['ip', 'route']
command.extend(args)
for item in options.items():
command.extend(item)
try:
process = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
except OSError as e:
self.logger.warning('Exception running %s. %s.', command, str(e))
else:
if process.returncode:
message = 'Non-zero exit status running %s. %s.'
self.logger.warning(message, command, stderr.strip())
else:
return stdout.decode('utf-8', 'replace')
return '' | Run a command with ip route and return the response.
Args:
args: list, the string ip route command args to execute.
options: dict, the string parameters to append to the ip route command.
Returns:
string, the standard output from the ip route command execution. |
def finalize(self, **kwargs):
"""
Finalize executes any remaining image modifications making it ready to show.
"""
# Set the aspect ratio to make the visualization square
# TODO: still unable to make plot square using make_axes_locatable
# x0,x1 = self.ax.get_xlim()
# y0,y1 = self.ax.get_ylim()
# self.ax.set_aspect(abs(x1-x0)/abs(y1-y0))
# Add the title to the plot if the user has set one.
self.set_title("")
# Set the legend with full opacity patches using manual legend.
# Or Add the colorbar if this is a continuous plot.
self.ax.legend(loc="best", frameon=True)
# Finalize the histograms
if self.hist:
plt.setp(self.xhax.get_xticklabels(), visible=False)
plt.setp(self.yhax.get_yticklabels(), visible=False)
plt.sca(self.ax)
# Call tight layout to maximize readability
plt.tight_layout() | Finalize executes any remaining image modifications making it ready to show. |
Subsets and Splits