query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Iterate through all accesses by ``for i in scope.accesses``. | def __iter__(self) -> Iterator[Access]:
for accesses in self._accesses.values():
for access in accesses:
yield access | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __iter__(self):\n while not self.accesses.empty():\n yield self.accesses.get()",
"def recursive_accesses(self, depth, max_depth=-1, external=True):\n # pylint: disable=not-an-iterable\n if max_depth == -1:\n max_depth = float('inf')\n\n for access in self.file_accesses:\n if external or access.is_internal:\n yield access\n if depth + 1 > max_depth:\n for act in self.activations:\n act_accesses = act.recursive_accesses(\n depth + 1, max_depth, external\n )\n for access in act_accesses:\n yield access",
"def accesses(self):\r\n return acc.Accesses(self)",
"def accesses(self):\r\n return acc.Accesses(self)",
"def references(self) -> Collection[Access]:\n # we don't want to publicly expose the mutable version of this\n return self.__accesses",
"def get_all_access(self):\n return self._access_lists.get_all_access()",
"def collectVariableAccesses(self, emit_read, emit_write):\n\n for element in self.subnode_statements:\n element.collectVariableAccesses(emit_read, emit_write)",
"def iter_sessions():\n return iter(_session_stack)",
"def remote_accesses(self):\n response = self._request(\"GET\", [ROUTE_REMOTE_ACCESSES])\n return CBWParser().parse_response(CBWRemoteAccess, response)",
"def collectVariableAccesses(self, emit_read, emit_write):\n\n self.subnode_iterator.collectVariableAccesses(emit_read, emit_write)",
"def collectVariableAccesses(self, emit_read, emit_write):\n\n self.subnode_iterated_length.collectVariableAccesses(emit_read, emit_write)",
"def get_access_list(self):\n return self.manager.get_access_list(self)",
"def get_all_access():\n\t# Get the email from the user making the request\n\temail = get_jwt_identity()\n\treturn get_all_access_helper(email)",
"def request_access(self):\n pass",
"def collectVariableAccesses(self, emit_read, emit_write):",
"def collectVariableAccesses(self, emit_read, emit_write):\n\n subnode_loop_body = self.subnode_loop_body\n\n if subnode_loop_body is not None:\n self.subnode_loop_body.collectVariableAccesses(emit_read, emit_write)",
"def ListAccessBindings(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def __iter__(self) -> Iterator[BaseAssignment]:\n for assignments in self._assignments.values():\n for assignment in assignments:\n yield assignment",
"def iter_relocations(self):\n for i in range(self.num_relocations()):\n yield self.get_relocation(i)",
"def iter_context_objects(self):\n use_gevent = is_gevent_enabled()\n use_context = is_context_enabled()\n\n if use_context:\n tid = context_get_ident()\n elif use_gevent:\n tid = greenlet_get_ident()\n else:\n tid = thread_get_ident()\n\n objects = self._cache.get(tid)\n if objects is None:\n if len(self._cache) > _MAX_CONTEXT_OBJECT_CACHE:\n self._cache.clear()\n objects = self._global[:]\n objects.extend(getattr(self._thread_context, \"stack\", ()))\n\n if use_gevent:\n objects.extend(getattr(self._greenlet_context, \"stack\", ()))\n\n if use_context:\n objects.extend(self._context_stack.get([]))\n\n objects.sort(reverse=True)\n objects = [x[1] for x in objects]\n self._cache[tid] = objects\n return iter(objects)",
"def __iter__(self):\n it = self.ctx.Iterator3(\n self.addr,\n ScType.EdgeAccessConstPosPerm,\n ScType.Unknown)\n\n return Iterator(it)",
"def collectVariableAccesses(self, emit_read, emit_write):\n\n self.subnode_module.collectVariableAccesses(emit_read, emit_write)",
"def collectVariableAccesses(self, emit_read, emit_write):\n\n self.subnode_new_locals.collectVariableAccesses(emit_read, emit_write)",
"def __iter__(self):\n for key in self._ctx:\n yield key",
"def access_control_list(self, values):\n # pylint: disable=not-an-iterable\n if isinstance(values, dict):\n self.validate_acl_data(values)\n email_names = self.parse_sync_service_acl(values)\n from ggrc.utils import user_generator as ug\n existing_people = {\n p.email: p for p in ug.load_people_with_emails(email_names)\n }\n\n absent_emails = set(email_names) - set(existing_people)\n absent_users = {email: email_names[email] for email in absent_emails}\n new_people = {\n p.email: p for p in ug.create_users_with_role(absent_users)\n }\n all_acl_people = dict(existing_people, **new_people)\n\n for acl in self._access_control_list:\n users = values.get(acl.ac_role.name, [])\n people = {all_acl_people[user[\"email\"]] for user in users}\n acl.update_people(people)\n else:\n roleable.Roleable.access_control_list.fset(self, values)",
"def collectVariableAccesses(self, emit_read, emit_write):\n\n self.subnode_source.collectVariableAccesses(emit_read, emit_write)",
"def collectVariableAccesses(self, emit_read, emit_write):\n\n self.subnode_source.collectVariableAccesses(emit_read, emit_write)",
"def collectVariableAccesses(self, emit_read, emit_write):\n\n self.subnode_source.collectVariableAccesses(emit_read, emit_write)",
"def collectVariableAccesses(self, emit_read, emit_write):\n\n self.subnode_expression.collectVariableAccesses(emit_read, emit_write)",
"def collectVariableAccesses(self, emit_read, emit_write):\n\n self.subnode_expression.collectVariableAccesses(emit_read, emit_write)"
] | [
"0.72342896",
"0.6824232",
"0.654937",
"0.654937",
"0.61959404",
"0.56890184",
"0.5652105",
"0.56444854",
"0.55087644",
"0.54611427",
"0.54452384",
"0.5443628",
"0.53988117",
"0.537276",
"0.5348753",
"0.5241717",
"0.52374184",
"0.5205386",
"0.51431155",
"0.5113757",
"0.50758237",
"0.5072556",
"0.50454885",
"0.50380826",
"0.5037159",
"0.5028267",
"0.5028267",
"0.5028267",
"0.5016971",
"0.5016971"
] | 0.78320724 | 0 |
A helper function to retrieve simple name str from a CSTNode or str | def get_name_for(node: Union[str, cst.CSTNode]) -> Optional[str]:
if isinstance(node, cst.Name):
return node.value
elif isinstance(node, str):
return node
elif isinstance(node, cst.Call):
return _NameUtil.get_name_for(node.func)
elif isinstance(node, cst.Subscript):
return _NameUtil.get_name_for(node.value)
elif isinstance(node, (cst.FunctionDef, cst.ClassDef)):
return _NameUtil.get_name_for(node.name)
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def name(node):\n\n return fst(node)",
"def get_name() -> str:",
"def get_name(node):\n if isinstance(node, ast.Name):\n return node.id",
"def nodeToShortName(node):\n\n pass",
"def get_name():",
"def nodeToLongName(node):\n\n pass",
"def node_getname( fdt, node_number_or_path ):\n\n name = \"\"\n try:\n node = fdt.get_node( node_number_or_path )\n name = node.name\n except:\n pass\n\n return name",
"def get_species_name(node_name_string):\n # Species code is the first part of leaf name (separated by an\n # underscore character)\n spcode = node_name_string.split(\"_\")[0:2]\n spcode = spcode[0]+'_'+spcode[1]\n # We could even translate the code to complete names\n return spcode",
"def m_getName(node_name=\"root\"):\n\n name = resolver.name(node_name=node_name)\n if name == None:\n print(\"No name assigned\")\n else:\n print(f\"Name: {name}\")\n\n return name",
"def get_mds_shortname(node):\n return str(node.getNodeName()).lower()",
"def get_name(node) -> str:\n if 'name' in node.attrib:\n return node.attrib['name']\n for elem in node:\n if elem.tag == 'name':\n return elem.text\n return ''",
"def getname(tree, accept_attr=True):\n if type(tree) is Name:\n return tree.id\n if type(tree) is Captured:\n return tree.name\n if accept_attr and type(tree) is Attribute:\n return tree.attr\n return None",
"def get_name() -> str:\n pass",
"def read_name(self):\n return self.node.read_name()",
"def read_name(self):\n return self.node.read_name()",
"def read_name(self):\n return self.node.read_name()",
"def read_name(self):\n return self.node.read_name()",
"def get_node_name(self, node):\n return node.name",
"def get_node_name(self):\n return util.join_names_underscore(self.name, str(self.as_pointer()))",
"def get_node_name(self, node):\n raise NotImplementedError()",
"def get_name(descr: str) -> str:\n return descr.split()[0]",
"def tname(self) -> str:",
"def getName():",
"def getName():",
"def getName():",
"def getName():",
"def getName():",
"def getName():",
"def getnodename(self, node_p):\n # (const node_bn* node)\n cnetica.GetNodeName_bn.argtypes = [c_void_p]\n cnetica.GetNodeName_bn.restype = c_char_p\n return cnetica.GetNodeName_bn(node_p) # name",
"def name(self):\n cSld = self._element.cSld\n return cSld.get('name', default='')"
] | [
"0.7268304",
"0.7049384",
"0.67698145",
"0.6742271",
"0.67241335",
"0.66804475",
"0.66666675",
"0.66269785",
"0.65419465",
"0.6449938",
"0.64347196",
"0.6416907",
"0.6346482",
"0.6314005",
"0.6314005",
"0.6314005",
"0.6314005",
"0.6242925",
"0.6222061",
"0.6213705",
"0.61930954",
"0.61709344",
"0.6134846",
"0.6134846",
"0.6134846",
"0.6134846",
"0.6134846",
"0.6134846",
"0.6130474",
"0.6115197"
] | 0.7702267 | 0 |
Returns true if ``node`` is part of the assignment at ``assignment_node``. Normally this is just a simple identity check, except for imports where the assignment is attached to the entire import statement but we are interested in ``Name`` nodes inside the statement. | def _is_assignment(node: cst.CSTNode, assignment_node: cst.CSTNode) -> bool:
if node is assignment_node:
return True
if isinstance(assignment_node, (cst.Import, cst.ImportFrom)):
aliases = assignment_node.names
if isinstance(aliases, cst.ImportStar):
return False
for alias in aliases:
if alias.name is node:
return True
asname = alias.asname
if asname is not None:
if asname.name is node:
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def isAssignment(self):\n return _libsbml.Rule_isAssignment(self)",
"def is_assign_to_name(statement):\n return isinstance(statement, ast.Assign) and \\\n len(statement.targets) == 1 and \\\n isinstance(statement.targets[0], ast.Name)",
"def is_assignment(*args):\n return _ida_hexrays.is_assignment(*args)",
"def has_node(self, node: str) -> bool:\n return self.graph.has_node(node)",
"def has_node(self, name):\n return self.source_net.has_node(name)",
"def contains(self, node):\n return node == self.__node_a or node == self.__node_b",
"def is_identity(node: onnx.NodeProto) -> bool:\n return node.op_type == 'Identity'",
"def _assigns_typealias(node: nodes.NodeNG | None) -> bool:\n inferred = utils.safe_infer(node)\n if isinstance(inferred, nodes.ClassDef):\n if inferred.qname() == \".Union\":\n # Union is a special case because it can be used as a type alias\n # or as a type annotation. We only want to check the former.\n assert node is not None\n return not isinstance(node.parent, nodes.AnnAssign)\n elif isinstance(inferred, nodes.FunctionDef):\n if inferred.qname() == \"typing.TypeAlias\":\n return True\n return False",
"def __eq__(self, node):\n if node == None or self.element != node.element:\n return False\n return self.left == node.left and self.right == node.right",
"def visit_assignname( # pylint: disable=too-many-branches\n self, node: nodes.AssignName\n ) -> None:\n frame = node.frame(future=True)\n assign_type = node.assign_type()\n\n # Check names defined in comprehensions\n if isinstance(assign_type, nodes.Comprehension):\n self._check_name(\"inlinevar\", node.name, node)\n\n # Check names defined in module scope\n elif isinstance(frame, nodes.Module):\n # Check names defined in Assign nodes\n if isinstance(assign_type, nodes.Assign):\n inferred_assign_type = utils.safe_infer(assign_type.value)\n\n # Check TypeVar's and TypeAliases assigned alone or in tuple assignment\n if isinstance(node.parent, nodes.Assign):\n if self._assigns_typevar(assign_type.value):\n self._check_name(\"typevar\", assign_type.targets[0].name, node)\n return\n if self._assigns_typealias(assign_type.value):\n self._check_name(\"typealias\", assign_type.targets[0].name, node)\n return\n\n if (\n isinstance(node.parent, nodes.Tuple)\n and isinstance(assign_type.value, nodes.Tuple)\n # protect against unbalanced tuple unpacking\n and node.parent.elts.index(node) < len(assign_type.value.elts)\n ):\n assigner = assign_type.value.elts[node.parent.elts.index(node)]\n if self._assigns_typevar(assigner):\n self._check_name(\n \"typevar\",\n assign_type.targets[0]\n .elts[node.parent.elts.index(node)]\n .name,\n node,\n )\n return\n if self._assigns_typealias(assigner):\n self._check_name(\n \"typealias\",\n assign_type.targets[0]\n .elts[node.parent.elts.index(node)]\n .name,\n node,\n )\n return\n\n # Check classes (TypeVar's are classes so they need to be excluded first)\n elif isinstance(inferred_assign_type, nodes.ClassDef):\n self._check_name(\"class\", node.name, node)\n\n # Don't emit if the name redefines an import in an ImportError except handler.\n elif not _redefines_import(node) and isinstance(\n inferred_assign_type, nodes.Const\n ):\n self._check_name(\"const\", node.name, node)\n else:\n self._check_name(\n \"variable\", node.name, node, disallowed_check_only=True\n )\n\n # Check names defined in AnnAssign nodes\n elif isinstance(assign_type, nodes.AnnAssign):\n if utils.is_assign_name_annotated_with(node, \"Final\"):\n self._check_name(\"const\", node.name, node)\n elif self._assigns_typealias(assign_type.annotation):\n self._check_name(\"typealias\", node.name, node)\n\n # Check names defined in function scopes\n elif isinstance(frame, nodes.FunctionDef):\n # global introduced variable aren't in the function locals\n if node.name in frame and node.name not in frame.argnames():\n if not _redefines_import(node):\n self._check_name(\"variable\", node.name, node)\n\n # Check names defined in class scopes\n elif isinstance(frame, nodes.ClassDef):\n if not list(frame.local_attr_ancestors(node.name)):\n for ancestor in frame.ancestors():\n if utils.is_enum(ancestor) or utils.is_assign_name_annotated_with(\n node, \"Final\"\n ):\n self._check_name(\"class_const\", node.name, node)\n break\n else:\n self._check_name(\"class_attribute\", node.name, node)",
"def __eq__(self, node):\n return (self.entry == node.entry)",
"def consistent(self, assignment):\n for node1 in assignment:\n for node2 in assignment:\n\n if node1 != node2:\n #returns False if any assignmed words are the same\n if assignment[node1] == assignment[node2]:\n return False\n\n overlap= self.crossword.overlaps[node1,node2]\n if overlap != None:\n #checks if words assigned to node overlaps are the same letter\n if assignment[node1][overlap[0]] != assignment[node2][overlap[1]]:\n return False\n\n return True",
"def can_assign(self, user, task):\n # already assigned\n if task.owner_id:\n return False\n\n # user not logged in\n if user is None or user.is_anonymous:\n return False\n\n # available for everyone\n if not task.owner_permission:\n return True\n\n # User have the permission\n obj = None\n if self._owner_permission_obj:\n if callable(self._owner_permission_obj):\n obj = self._owner_permission_obj(task.process)\n else:\n obj = self._owner_permission_obj\n\n return user.has_perm(task.owner_permission, obj=obj)",
"def can_reevaluate(self, node):\n return isinstance(node, (ast.Name, ast.Num, ast.Str)) or \\\n (six.PY3 and isinstance(node, ast.Bytes)) or \\\n (ast_has_name_constant and isinstance(node, ast.NameConstant))",
"def node_exists(self, node_name: str) -> bool:\r\n return self.get_authentic_node_name(node_name) is not None",
"def _process_assign(self, node: ast.Assign) -> None:\n if isinstance(node.value, ast.Call) and self._is_export_call(\n node.value.func\n ):\n # id = tf_export(...)(...)\n if len(node.targets) != 1:\n raise BadExportError(\n f'{self._current_file}:{node.lineno} export must be'\n f' assigned to a single value: {ast.dump(node)}'\n )\n symbol = self._name(node.targets[0])\n if not symbol:\n raise BadExportError(\n f'{self._current_file}:{node.lineno} export must be'\n f' assigned to a single value: {ast.dump(node)}'\n )\n self._add_exported_symbol(node.value.func, symbol)\n else:\n self.visit(node)",
"def visit_Assign(self, node: ast.Assign) -> None:\n # skip multiple assignments\n if len(node.targets) != 1:\n return\n\n # skip complex assignments\n if not isinstance(node.targets[0], ast.Name):\n return\n\n name = node.targets[0].id\n\n # skip private attributes\n if name.startswith(\"_\"):\n return\n\n self.attribute_nodes.append(node)",
"def visit_Assign(self, node):\n self.generic_visit(node)\n is_multiple = len(node.targets) > 1\n is_compound = any(map(is_sequence_node, node.targets))\n is_simple = not is_compound\n if is_simple and is_multiple:\n return self.visit_simple_assign(node)\n elif is_compound and (is_multiple or is_sequence_node(node.value)):\n return self.visit_compound_assign(node)\n return node",
"def __eq__(self, node):\n if node == None or self.element != node.element:\n return False\n return self.index == node.index",
"def __check_node_is_already_visited(self, node_name: str) -> bool:\r\n return next((True for visited_node in self.__priority_queue if visited_node.node_name == node_name), False)",
"def has_node(self, n):\n return n in self.node_dict",
"def from_node(self, a):\n return a == self.__node_a",
"def has_assignment_for(self, var):\n return self.variable_to_value.get(var) != None",
"def assignment(self):\n shards = self.line.split('=')\n if len(shards) == 2:\n return True",
"def is_member(self, node):\n return node in self._members",
"def user_is_assignmentadmin(userobj):\n from .assignment import Assignment\n return user_is_basenodeadmin(userobj, Assignment)",
"def is_branch_node(tree, node):\n if node == tree.root:\n return tree.degree(node) >= 2\n else:\n return tree.degree(node) >= 3",
"def assignment_complete(self, assignment):\n # print(\"Entered assignment_complete Function\")\n for var in assignment:\n if assignment[var] is None:\n return False\n return self.consistent(assignment)\n\n # raise NotImplementedError",
"def assignment_complete(self, assignment):\n if len(assignment) == len(self.domains):\n return True\n\n else:\n return False",
"def __is_tree_node(self, node):\n if not node.input:\n if len(node.output) > 1:\n return False\n\n if len(node.output) > 1:\n return False\n\n for input_node in node.input:\n cls = self.__is_tree_node(input_node)\n if not cls:\n return False\n return True"
] | [
"0.62317604",
"0.60697824",
"0.5954752",
"0.5715725",
"0.55724084",
"0.55121505",
"0.5378449",
"0.5343535",
"0.53163487",
"0.5311576",
"0.53089356",
"0.52855116",
"0.5275552",
"0.5273492",
"0.52600914",
"0.52340776",
"0.520116",
"0.51995283",
"0.5169611",
"0.5149941",
"0.5117847",
"0.5106151",
"0.5078667",
"0.5066464",
"0.5021078",
"0.5006188",
"0.50053525",
"0.4992631",
"0.49865675",
"0.4939657"
] | 0.836117 | 0 |
Returns whether it successfully handled the string annotation | def _handle_string_annotation(
self, node: Union[cst.SimpleString, cst.ConcatenatedString]
) -> bool:
if (
self.__in_type_hint_stack[-1] or self.__in_annotation_stack[-1]
) and not self.__in_ignored_subscript:
value = node.evaluated_value
if value:
top_level_annotation = self.__last_string_annotation is None
if top_level_annotation:
self.__last_string_annotation = node
try:
mod = cst.parse_module(value)
mod.visit(self)
except cst.ParserSyntaxError:
# swallow string annotation parsing errors
# this is the same behavior as cPython
pass
if top_level_annotation:
self.__last_string_annotation = None
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_action_str(string: str) -> bool:",
"def is_string(self):\n answer = self._call('is_string')\n return answer.yes",
"def simple(self) -> bool:\n return is_simple(self.string)",
"def check(self, text):\n\n try:\n console.print(self.parser.parse(text)[\"result\"][1:], style=\"green\")\n return True\n\n except:\n console.print(\"An error has occurred while trying to parse the typo!\", style=\"red\")\n return False",
"def _is_valid_target_str(self, target):\n if isinstance(target, str):\n return True\n else:\n return False",
"def _get_str_checker(cls, expect_tp):\n def checker(v):\n caller_f = inspect.currentframe().f_back.f_back\n args = caller_f.f_locals['args']\n if (not args) or (expect_tp != type(args[0]).__name__):\n raise TypeError(\"string annotation can only be used \"\n \"for annotate class it self.\")\n tp = type(args[0])\n return isinstance(v, tp)\n return checker",
"def is_stringifier(self):\n return self._is_stringifier",
"def stringable(self):\n return True",
"def IsString(self, pos):\n style = self.GetStyleAt(pos)\n return self.FindTagById(style) in ('string_style', 'char_style')",
"def isRecognized(self, filename):\n if self.recognize(filename) is not None: return True\n else: return False",
"def _is_string(arg):\n return isinstance(arg, types.StringTypes)",
"def _handle_ellipsis(value: Any, annotation: Any) -> bool:\n return value == ...",
"def is_str ( self, s ):\r\n\t\treturn isinstance ( s, type( str () ) )",
"def is_string(document):\r\n return isinstance(document, str)",
"def is_encoded(self,text):\n \n try:\n str(text)\n except:\n return False\n else:\n return True",
"def has_string_type(obj: _std_typing.Any) -> bool:\n return obj.dtype == sc.DType.string",
"def hasSubstring(self, s):\n node, off = self.followPath(s)\n return node is not None",
"def _is_string(self, obj):\n return isinstance(obj, unicode) or isinstance(obj, str)",
"def _handle_ok_ack(string):\n if string.strip() == Parser.OK_MSG:\n return True\n return False",
"def ISTEXT(value):\n return isinstance(value, (basestring, AltText))",
"def check_string():\n\n # Forcing check for valid json and headers with Content-Type:application/json\n content = request.get_json(silent=False, force=True)\n\n payload = content.get('data', None)\n \n if not payload:\n return response_handler(\n {\"error\": \"'data' key missing from JSON payload.\"},\n 400\n )\n if not isinstance(payload, basestring):\n return response_handler(\n {\"error\": \"Value of 'data' key is not of type 'string'.\"},\n 400\n )\n \n pangram = analyze_string(payload)\n if not pangram:\n return response_handler(\n {\"error\": False},\n 400\n )\n\n return response_handler(\n {\"success\": True},\n 200\n )",
"def is_string_type(self):\n raise exceptions.NotImplementedError()",
"def has_label(self, phrase_string: str) -> bool:\n return phrase_string in self.has_labels",
"def is_english (self, testing_string): \n try:\n self.testing_string.encode(encoding='utf-8').decode('ascii')\n except UnicodeDecodeError:\n return False\n else:\n return True",
"def does_signature_contain_str(signature=None):\n\n # if we did not receive a signature we assume the model could require\n # a string in it's input\n if signature is None:\n return True\n\n return any(v.dtype == dtypes.string.as_datatype_enum\n for v in signature.inputs.values())",
"def check_statement(self, statement):\n return isinstance(statement, str)",
"def typeIsString(obj):\n return type(obj) is str or _haveTypeUnicode and type(obj) is unicode",
"def IsValid(self):\n return len(self.Text) > 0",
"def is_word(self, word):\r\n\r\n return self.data(word) is not None",
"def is_string(obj):\n return isinstance(obj, str)"
] | [
"0.65544593",
"0.6309469",
"0.6091547",
"0.6053214",
"0.6043687",
"0.60382533",
"0.6006412",
"0.5999586",
"0.59370255",
"0.5861694",
"0.5861107",
"0.58280134",
"0.58182687",
"0.5812485",
"0.57880664",
"0.57711285",
"0.5747984",
"0.57046604",
"0.56872684",
"0.56801474",
"0.5679592",
"0.5659685",
"0.5648297",
"0.56431824",
"0.5640149",
"0.5638287",
"0.5613733",
"0.56059587",
"0.5600445",
"0.55980086"
] | 0.74615365 | 0 |
Test the reading of passwords. | def test_read(sqlite_db):
site = "www.example.com"
passwd = smm.read_passwd(site)
assert passwd == "TheNewPassword"
bad_request = smm.read_passwd("NotASite")
assert not bad_request | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_check_password():\n assert check_password('Longpassword') == False\n assert check_password('123456') == False\n assert check_password('short') == False\n assert check_password('C0rect') == False\n assert check_password('Correct8') == True",
"def test_valid_password(self):\n pass_field = Field(\"\".join(['x' * (int(self.app.config['MAX_PWD_LEN']))]))\n\n valid_password(None, pass_field)",
"def test_invalid_password(self):\n pass",
"def testPassword(cryptPass, dictionaryFile):\n #salt = cryptPass[0:2]\n salt = crypt.mksalt(crypt.METHOD_SHA512) # Updated for SHA512 encrypted passwords\n dictFile = open(dictionaryFile, 'r')\n for word in dictFile.readlines():\n word = word.strip('\\n')\n cryptWord = crypt.crypt(word, salt)\n \n if cryptWord == cryptPass:\n print('[+] Found Password: ' + word + '\\n')\n return\n print('[-] Password Not Found.\\n')\n return",
"def test_valid_password_valid():\n assert valid_password(\"123456\")\n assert valid_password(\"abcdef\")",
"def testRead(self):\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_TSK, location='/passwords.txt',\n inode=self._IDENTIFIER_PASSWORDS_TXT, parent=self._bde_path_spec)\n file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)\n\n self._TestRead(file_object)",
"def testRead(self):\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_TSK, location='/passwords.txt',\n inode=self._IDENTIFIER_PASSWORDS_TXT, parent=self._bde_path_spec)\n file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)\n\n self._TestRead(file_object)",
"def test_user1_method4():\n assert u.verify_password(USER_CREDENTIALS[\"password\"]), \"Password cannot verify properly\"",
"def get_password_testing():\n if settings.CLOUD:\n return [os.environ.get('passwordtest')]\n with open('env.yaml') as file_name:\n data = yaml.safe_load(file_name)\n return (data['test_variables']['password'],)",
"def test_set_user_password(self):\n pass",
"def test_integration():\n input_values = read_in_range('day04/input.txt')\n n_valid = count_valid_passwords(input_values)\n assert n_valid == 511",
"def test_password_validator(self):\n # Test with bad passwords\n pass_validator = self.validator.password_validator\n pass_list = mock_data['bad_pass']\n is_valid = pass_validator(pass_list[0])\n self.assertEqual(is_valid, 'Password must have eight characters')\n is_valid = pass_validator(pass_list[1])\n self.assertEqual(is_valid, 'Password must have a lowercase character')\n is_valid = pass_validator(pass_list[2])\n self.assertEqual(is_valid, 'Password must have an uppercase character')\n is_valid = pass_validator(pass_list[3])\n self.assertEqual(is_valid, 'Password must have a number')\n is_valid = pass_validator(pass_list[4])\n self.assertEqual(is_valid, 'Password must have one of this: _@*%!&$')\n is_valid = pass_validator(pass_list[5])\n self.assertEqual(is_valid, 'Password cannot have spaces')\n # Test with good password\n is_valid = pass_validator(mock_data['good_pass'])\n self.assertEqual(is_valid, True)",
"def _check_password(self, body):\n if not self.config.security_initialize:\n raise RuntimeError(\"First set a password\")\n\n password = hash_password(body[ATTR_PASSWORD])\n if password != self.config.security_password:\n raise RuntimeError(\"Wrong password\")",
"def test_aws_service_api_vm_password_get(self):\n pass",
"def test_random_password():\n output = sh.random_password()\n assert isinstance(output, str) is True\n assert len(output) == 16",
"def test01_password_length(self):\n self.set_complexity(length=13, numeric=0, upper=0, lower=0, special=0)\n\n invalid = (\n \"A\",\n \"Tr0ub4dor&3\",\n \"!!!!!!!!!!!!\",\n \"Password\",\n \"Password123!\",\n \"admin\",\n )\n self.assertPasswordsInvalid(invalid)\n\n valid = (\n \"correcthorsebatterystaple\",\n \"abcdefghijklmopqrstuvwxyz\",\n \"12345678901234567890\",\n \"!!!!!!!!!!!!!\" \"Password123!___\",\n )\n self.set_passwords(valid)",
"def test_password_salts_are_random(self):\n self.user.password = '123456'\n self.user2.password = '123456'\n self.assertTrue(self.user.password_hash != self.user2.password_hash)",
"async def check_password(self, login, password):",
"async def password(self, ctx):\n pass",
"def test_check_password(self):\n user = User.query.filter_by(username='eschoppik').first()\n self.assertTrue(bcrypt.check_password_hash(user.password, 'secret'))\n self.assertFalse(bcrypt.check_password_hash(user.password, 'notsecret'))",
"def test_4_is_valid_password(self):\n self.factory = RequestFactory()\n response = self.factory.post('', data={'password': '123123123'}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n response = is_valid_password(response)\n self.assertContains(response, 'The password is invalid')\n\n response = self.factory.post('', data={'password': '!pasword123'}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n response = is_valid_password(response)\n self.assertContains(response, 'The password is correct')\n\n self.factory = RequestFactory()\n response = self.factory.post('', data={'password': '123123123'})",
"def test_password_is_okay():\n\twith pytest.raises(Exception):\n\t\tassert password_is_ok('qqqqqqqq') == Exception('Password either has to contain a digit \\n or password has to contain uppercase \\n or password has to contain lowercase') \n\n\twith pytest.raises(Exception):\n\t\tassert password_is_ok('') == Exception('Password either has to contain a digit \\n or password has to contain uppercase \\n or password has to contain lowercase') \n\n\t\"\"\"test that valid passwords work\"\"\"\n\tassert password_is_ok('Q8qqqqqqqq') == True\n\tassert password_is_ok('q8qqqqqqqq') == True\n\tassert password_is_ok('Qqqqqqqqqq') == True\n\tassert password_is_ok('qqqqqqqqqq') == True",
"def test_password_set(self):\r\n tst = User()\r\n tst.password = self.test_password\r\n\r\n self.assertEqual(\r\n len(tst.password),\r\n 60,\r\n \"Hashed should be 60 char long: \" + tst.password)\r\n self.assertEqual(\r\n '$2a$',\r\n tst.password[:4],\r\n \"Hash should start with the right complexity: \" + tst.password[:4])",
"def check_password(self, password):\n return self.password == password",
"def test_length(self):\n for length in range(2, 30):\n self.assertEqual(len(generate_password(length)), length)",
"def test_password_verification(self):\n self.user.password = '123456'\n self.assertTrue(self.user.verify_password('123456'))\n self.assertFalse(self.user.verify_password('password'))",
"def verify_password(self, password):\n return self.PASS == password",
"def test_password_strength_validator(self):\n self.assertIsNone(validate_password_strength('abcd123'))",
"def main(dictionaryFile, passwordFile):\n passFile = open(passwordFile)\n for line in passFile.readlines():\n if ':' in line:\n user = line.split(':')[0]\n cryptPass = line.split(':')[1].strip(' ')\n print('[*] Cracking Password For: ' + user)\n testPassword(cryptPass, dictionaryFile)",
"def test_password_field(self):\n field = self.record.find('field[@name=\\'password\\']')\n self.assertEqual(field.text, 'adt', 'Incorrect password Field')"
] | [
"0.729323",
"0.72582704",
"0.71649766",
"0.6846707",
"0.6810091",
"0.67917",
"0.67917",
"0.6777287",
"0.6760344",
"0.673974",
"0.67032176",
"0.6686708",
"0.6638984",
"0.66241276",
"0.6607445",
"0.6604479",
"0.65660244",
"0.6564443",
"0.65401316",
"0.65349746",
"0.65200394",
"0.6513057",
"0.64669245",
"0.6457122",
"0.6454174",
"0.6451952",
"0.6431571",
"0.6413607",
"0.64063686",
"0.6404856"
] | 0.7469244 | 0 |
Test the removal of passwords. | def test_removal(sqlite_db):
site = "www.example.com"
response = smm.remove_passwd(site)
assert response
bad_response = smm.remove_passwd(site)
assert not bad_response
assert not smm.read_passwd(site) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_invalid_password(self):\n pass",
"def test_check_password():\n assert check_password('Longpassword') == False\n assert check_password('123456') == False\n assert check_password('short') == False\n assert check_password('C0rect') == False\n assert check_password('Correct8') == True",
"def delete_password(cls, media):\n for password in cls.passwords:\n if password.media.lower() == media.lower():\n cls.passwords.remove(password)",
"def removal_password(request):\n context = {'title': 'Profile Removal Password', 'password': settings.MDM_PASS, 'now': timezone.now()}\n return render(request, 'laptops/password.html', context)",
"def test_valid_password(self):\n pass_field = Field(\"\".join(['x' * (int(self.app.config['MAX_PWD_LEN']))]))\n\n valid_password(None, pass_field)",
"def test_uniqueness(self):\n passwords = tuple(generate_password(8) for i in range(100))\n self.assertEqual(len(passwords), len(set(passwords)))",
"def test_reset_password(self):\n\n dietitian = Dietitian.query.get(1)\n reset_password(\"newpass\", dietitian)\n\n self.assertEqual(True, dietitian.check_password(\"newpass\"))",
"def test_allow_all_password_reuse(self):\r\n student_email, _ = self._setup_user()\r\n user = User.objects.get(email=student_email)\r\n\r\n err_msg = 'You are re-using a password that you have used recently.'\r\n\r\n token = default_token_generator.make_token(user)\r\n uidb36 = int_to_base36(user.id)\r\n\r\n # try to do a password reset with the same password as before\r\n resp = self.client.post('/password_reset_confirm/{0}-{1}/'.format(uidb36, token), {\r\n 'new_password1': 'foo',\r\n 'new_password2': 'foo'\r\n }, follow=True)\r\n\r\n self.assertNotIn(\r\n err_msg,\r\n resp.content\r\n )",
"def test_replacePasswordWrong(self):\n self.realm._txCryptContext, perform = getTestContext()\n account = self.realm.addAccount(\n self.localpart, self.domain, self.password)\n d = account.replacePassword(u'blahblah', u'blah')\n perform()\n perform()\n self.failureResultOf(d, errors.BadCredentials)",
"def delete_password(self) -> None:\n\n msg = QtWidgets.QMessageBox()\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(\":/newPrefix/new.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n msg.setWindowIcon(QtGui.QIcon(icon))\n msg.setIcon(QtWidgets.QMessageBox.Warning)\n service = self.lineEdit_6.text()\n self.lineEdit_6.clear()\n if not service:\n msg.setWindowTitle(\"Store Password\")\n msg.setText(\"Please fill all fields.\")\n msg.exec_()\n else:\n service = service.capitalize()\n msg.setIcon(QtWidgets.QMessageBox.Information)\n if not fetch_password(self.uid):\n msg.setWindowTitle(\"Delete Password\")\n msg.setText(\"Your safe is empty.\")\n msg.exec_()\n elif check_service(self.uid, service):\n delete_password(self.uid, service)\n msg.setWindowTitle(\"Delete Password\")\n msg.setText(\"Password has been removed successfully.\")\n msg.exec_()\n else:\n msg.setWindowTitle(\"Delete Password\")\n msg.setText(\"Service doesn't exist. Go to 'View Passwords' to view your existing services and try again.\")\n msg.exec_()",
"def test_generate_secret(self):\n random_secret = ef_password.generate_secret(24)\n self.assertEqual(len(random_secret), 24)\n assert not set('[~!@#$%^&*()_+{}\":;\\']+$').intersection(random_secret)",
"def test_valid_password_valid():\n assert valid_password(\"123456\")\n assert valid_password(\"abcdef\")",
"def test_valid_password_invalid():\n assert not valid_password(\"\")\n assert not valid_password(\"1234567\")\n assert not valid_password(\"abcdefg\")",
"def test_disabled_too_frequent_password_resets(self):\r\n student = self._user_factory_with_history()\r\n\r\n self.assertFalse(PasswordHistory.is_password_reset_too_soon(student))",
"def test_password_salts_are_random(self):\n self.user.password = '123456'\n self.user2.password = '123456'\n self.assertTrue(self.user.password_hash != self.user2.password_hash)",
"def test_userdel():\n mock = MagicMock(return_value=\"Salt\")\n with patch.dict(htpasswd.__salt__, {\"cmd.run\": mock}), patch(\n \"os.path.exists\", MagicMock(return_value=True)\n ):\n assert htpasswd.userdel(\"/etc/httpd/htpasswd\", \"larry\") == [\"Salt\"]",
"def test_new_password(self):\n form_data = self.form_data(self.pwd)\n form = self.form(data=form_data, user=self.u)\n self.assertFalse(form.is_valid())",
"def test_view_all_incorrect_password(self):\n self.client.login(username='DELETE_USER', password='incorrect_password') # nosec -- this code runs in test only\n response = self.client.get(reverse('crt_forms:crt-forms-index'))\n self.assertEqual(response.status_code, 302)\n self.assertRedirects(response, '/accounts/login/?next=/form/view')",
"def test_4_is_valid_password(self):\n self.factory = RequestFactory()\n response = self.factory.post('', data={'password': '123123123'}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n response = is_valid_password(response)\n self.assertContains(response, 'The password is invalid')\n\n response = self.factory.post('', data={'password': '!pasword123'}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n response = is_valid_password(response)\n self.assertContains(response, 'The password is correct')\n\n self.factory = RequestFactory()\n response = self.factory.post('', data={'password': '123123123'})",
"def test_delete_creds(self):\n self.new_credentials.save_creds()\n self.new_credentials.delete_creds()\n\n self.assertEqual(len(Credentials.credential_list),0)",
"def test_no_delete_entry_on_change_password_server_response(self):\n self.validate_no_deletes_entry_returned()",
"def test_invalid_password(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n rv = self.login('[email protected]', 'Bo1905')\n self.assertIn(b'Invalid password! Please try again', rv.data)",
"def test_password_verification(self):\n self.user.password = '123456'\n self.assertTrue(self.user.verify_password('123456'))\n self.assertFalse(self.user.verify_password('password'))",
"def test_incorrect_password(self):\n input = (\"admin\", \"\")\n if is_travis():\n self.login_test(*input, True)\n else:\n self.login_test(*input)",
"def clean_password_repeat(self):\n if 'password' in self.cleaned_data and 'password_repeat' in self.cleaned_data:\n if self.cleaned_data['password'] != self.cleaned_data['password_repeat']:\n raise forms.ValidationError('The password fields didn\\'t match: Password confirmation failed.')\n return self.cleaned_data['password_repeat']",
"def clean_password2(self):\n\t\tpassword = self.cleaned_data['password']\n\t\trepetir_password = self.cleaned_data['repetir_password']\n\t\tif password != repetir_password:\n\t\t\traise forms.ValidationError('Las contrasenas no coinciden.')\n\t\treturn repetir_password",
"def test_password_strength_validator(self):\n self.assertIsNone(validate_password_strength('abcd123'))",
"def test_user1_method4():\n assert u.verify_password(USER_CREDENTIALS[\"password\"]), \"Password cannot verify properly\"",
"def verify_password_input(self, root):\n\n ui = self.widgets['Password Entry'].get()\n\n if ui == self.password[0]:\n self.widgets['Login Window'].destroy()\n root.deiconify()\n menu.define_workspace(root)\n else:\n widgets.edit_message(self.widgets['Message'], self.msg_dict, 'er: Incorrect Password')\n self.widgets['Message'].pack(side=TOP, fill=X)\n self.widgets['Password Entry'].delete(0, END)",
"def test_replacePasswordCorrect(self):\n self.realm._txCryptContext, perform = getTestContext()\n account = self.realm.addAccount(\n self.localpart, self.domain, self.password)\n username = u'%s@%s' % (self.localpart, self.domain)\n d = account.replacePassword(self.password, u'blahblah')\n perform()\n perform()\n self.successResultOf(d)\n d = self._requestAvatarId(UsernamePassword(username, u'blahblah'))\n perform()\n self.assertEquals(self.successResultOf(d), account.storeID)\n d = self._requestAvatarId(UsernamePassword(username, self.password))\n perform()\n self.failureResultOf(d, UnauthorizedLogin)"
] | [
"0.6817206",
"0.666834",
"0.6652047",
"0.65106744",
"0.64689183",
"0.64474946",
"0.64468163",
"0.6384452",
"0.63637775",
"0.6348568",
"0.63416636",
"0.63272005",
"0.63171124",
"0.6305562",
"0.6297313",
"0.6289528",
"0.627948",
"0.6237605",
"0.6184281",
"0.61652255",
"0.6126862",
"0.6123407",
"0.6120188",
"0.6117468",
"0.6081847",
"0.6068307",
"0.6057853",
"0.6053856",
"0.60497004",
"0.60438174"
] | 0.78059477 | 0 |
inserts new course object into linked list | def insert(self, course):
new_node = Node(course)
if self.head is None or self.head.data.number() >= new_node.data.number():
new_node.next = self.head
self.head = new_node
self._size += 1
return
cur_node = self.head
while cur_node.next and cur_node.next.data.number() < new_node.data.number():
cur_node = cur_node.next
new_node.next = cur_node.next
cur_node.next = new_node
self._size += 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add():\n prev_courses = Course._file.read_db()\n course_name = input(\"Please, type course name >\")\n # check course for uniqueness/ instantiating blank class with one attribute\n c = Course(course_name)\n if c.is_course_exists():\n print(\"{} is already exists\".format(course_name))\n return\n\n prev_courses[\"courses\"].append({\n \"course_name\": course_name,\n \"teacher\": input(\"Please, type teacher's email >\"),\n \"total_place\": int(input(\"Please, type total enrolled number >\")),\n \"students\": []\n })\n Course._file.write_db(prev_courses)\n print(\"New course - {} is added\".format(course_name))\n return",
"def add_course(self, course):\n if course == isinstance(course, list):\n self.courses.extend(course)\n else:\n self.courses.append(course)",
"def addCourse(self):\n\t\tcourseName = input(\"What is the new course name? \")\n\t\tcourseGrade = eval(input(\"What grade point did you get? \"))\n\t\tself.courses.append(Course(courseName,courseGrade))\n\t\tself.gpa = self.calculateGPA()",
"def save_course(self, course: Course) -> None:\n self.collection.insert_one(course.dict())",
"def add_course(self, course: 'Course'):\n\n new_diff = self._total_diff + course.difficulty\n new_diff_rating = new_diff / (len(self._courses) + 1)\n\n # Add course\n self._courses[course.get_course_code()] = course\n\n # Update attributes\n self._total_load += course.credit_load\n if self._subj_dist.get(course.subj) is None:\n self._subj_dist[course.subj] = 0\n self._subj_dist[course.subj] += 1\n self._total_diff = new_diff\n self._diff_rating = new_diff_rating",
"def insert_course_index(self, course_index):\r\n self.course_index.insert(course_index)",
"def associate(self, course):\n new_topics_courses = TopicsCourses()\n new_topics_courses.course = course\n self.topics_courses.append(new_topics_courses)\n return new_topics_courses",
"def save_course(self):\r\n self.course.save()\r\n self.store.update_item(self.course, self.user.id)",
"def set_course(self, new_course, updating=False):\n COURSE_QUERY = \"\"\"UPDATE Course SET subject_code = %s, credit_hours = %s, description = %s WHERE name = %s\"\"\" if updating \\\n else \"\"\"INSERT INTO Course (subject_code, credit_hours, description, name) VALUES (%s, %s, %s, %s)\"\"\"\n\n self.db_cursor.execute(COURSE_QUERY, (new_course.subject_code, new_course.credit_hours, new_course.description, new_course.name))\n self.db_connection.commit()\n\n # Add course topics and course goals:\n for ct_id in new_course.topics:\n self.set_course_topic(ct_id, new_course.name)\n for cg_id in new_course.goals:\n self.set_course_goal(cg_id, new_course.name)",
"def push_courses(self):\n self._model_stack.append(Course)\n return self",
"def add_course(graph, course, parent, color):\n if course[-1] == '*':\n # Concurrent course\n color = 'green'\n course = course[:-1]\n child = pydot.Node(course)\n graph.add_node(child)\n edge = pydot.Edge(parent, course, color=color)\n graph.add_edge(edge)",
"def add():\n add_form = AddCourseForm(request.form)\n if request.method == 'POST':\n Course.new(name=add_form.name.data,\n owner_id=g.user.id,\n visibility=add_form.visibility.data,\n term=add_form.term.data)\n flash('New course added')\n return redirect(url_for('courses.index'))\n return render_template('courses/add.html', add_form=add_form)",
"def add_course(self, course):\n if course in self.courses:\n raise NameError('This student is already enrolled in that course')\n else:\n self.courses[course] = 0\n\n return self",
"def add_to_prere(self, pre_course):\n self.prere[pre_course.get_course_id()] = pre_course",
"def add(self, course: Course, day: str) -> None:\n # if the day is not in the dictionary representing the\n # schedule, then create an empty list and add it\n if day not in self.schedule:\n day_list = []\n day_list.append(course)\n self.schedule[day] = day_list\n # if the day is already in the dictionary that represents the\n # schedule, then append the current course to the list\n # note that you do not need to re-assign the list to the\n # value in the dictionary because of the fact that the list\n # is stored as a reference which is updated through append\n else:\n current_day_list = self.schedule[day]\n current_day_list.append(course)",
"def add_course(self, course: 'Course', index: int):\n if index >= len(self._semesters):\n raise IndexError(\"Index given was beyond the bounds of self._semesters\")\n return self._semesters[index].add_course(course)",
"def register_course(self, **fields):\n if 'course_key' not in fields.keys():\n raise KeyError('Primary key is missing')\n existing_fields = [i.name for i in self._db.get_columns('courses')]\n needed_fields = {}\n for key, value in fields.items():\n if key in existing_fields:\n needed_fields[key] = value\n check = Courses.get_or_none(course_key=needed_fields['course_key'])\n if check is not None:\n return check\n new_course = Courses.get_or_create(**needed_fields)\n return new_course",
"def _update(self, course_name: str, newdata: ParseType) -> None:\n\n self.courses[course_name] = newdata",
"def edit_course(self, course):\n EDIT_COURSE = \"\"\"UPDATE Course SET subject_code = %s, credit_hours = %s, description = %s WHERE name = %s\"\"\"\n\n self.db_cursor.execute(EDIT_COURSE, (\n course.subject_code, course.credit_hours, course.description, course.name))\n self.db_connection.commit()\n\n DELETE_COURSE_TOPICS = \"\"\"DELETE FROM CourseTopics WHERE course_name = %s\"\"\"\n self.db_cursor.execute(DELETE_COURSE_TOPICS, (course.name,))\n self.db_connection.commit()\n INSERT_COURSE_TOPICS = \"\"\"INSERT INTO CourseTopics (course_name, topic_id) VALUES (%s, %s)\"\"\"\n for ct in course.topics:\n self.db_cursor.execute(INSERT_COURSE_TOPICS, (course.name,ct))\n self.db_connection.commit()\n\n DELETE_COURSE_GOALS = \"\"\"DELETE FROM CourseGoals WHERE course_name = %s\"\"\"\n self.db_cursor.execute(DELETE_COURSE_GOALS, (course.name,))\n self.db_connection.commit()\n INSERT_COURSE_GOALS = \"\"\"INSERT INTO CourseGoals (course_name, goal_id) VALUES (%s, %s)\"\"\"\n for cg in course.goals:\n self.db_cursor.execute(INSERT_COURSE_GOALS, (course.name, cg))\n self.db_connection.commit()",
"def add_instance_of_course(self, semester, professor, crn, status):\n self.semesters.append(semester)\n self.professors.append(professor)\n self.statuses.append(status)\n self.instances[crn] = (semester, professor, status)",
"def _create_course(self):\r\n super(TestOrphan, self)._create_course()\r\n\r\n self._create_item('chapter', 'Chapter1', {}, {'display_name': 'Chapter 1'}, 'course', 'runid')\r\n self._create_item('chapter', 'Chapter2', {}, {'display_name': 'Chapter 2'}, 'course', 'runid')\r\n self._create_item('chapter', 'OrphanChapter', {}, {'display_name': 'Orphan Chapter'}, None, None)\r\n self._create_item('vertical', 'Vert1', {}, {'display_name': 'Vertical 1'}, 'chapter', 'Chapter1')\r\n self._create_item('vertical', 'OrphanVert', {}, {'display_name': 'Orphan Vertical'}, None, None)\r\n self._create_item('html', 'Html1', \"<p>Goodbye</p>\", {'display_name': 'Parented Html'}, 'vertical', 'Vert1')\r\n self._create_item('html', 'OrphanHtml', \"<p>Hello</p>\", {'display_name': 'Orphan html'}, None, None)\r\n self._create_item('static_tab', 'staticuno', \"<p>tab</p>\", {'display_name': 'Tab uno'}, None, None)\r\n self._create_item('about', 'overview', \"<p>overview</p>\", {}, None, None)\r\n self._create_item('course_info', 'updates', \"<ol><li><h2>Sep 22</h2><p>test</p></li></ol>\", {}, None, None)",
"def post_add_link(self):\n course = courses.Course(self)\n link = course.add_link()\n link.href = ''\n course.save()\n self.redirect(self.get_action_url(\n 'edit_link', key=link.unit_id, extra_args={'is_newly_created': 1}))",
"def associate_topic(self, topic):\n new_topics_courses = TopicsCourses()\n new_topics_courses.topic = topic\n self.topics_courses.append(new_topics_courses)\n return new_topics_courses",
"def __init__(self, course_id, existing_entry):\r\n super(DuplicateCourseError, self).__init__()\r\n self.course_id = course_id\r\n self.existing_entry = existing_entry",
"def put(self):\n request = transforms.loads(self.request.get('request'))\n\n if not self.assert_xsrf_token_or_fail(\n request, 'import-course', {'key': None}):\n return\n\n if not CourseOutlineRights.can_edit(self):\n transforms.send_json_response(self, 401, 'Access denied.', {})\n return\n\n payload = request.get('payload')\n course_raw = transforms.json_to_dict(\n transforms.loads(payload), self.SCHEMA_DICT)['course']\n\n source = None\n for acourse in sites.get_all_courses():\n if acourse.raw == course_raw:\n source = acourse\n break\n\n if not source:\n transforms.send_json_response(\n self, 404, 'Object not found.', {'raw': course_raw})\n return\n\n course = courses.Course(self)\n errors = []\n try:\n course.import_from(source, errors)\n except Exception as e: # pylint: disable-msg=broad-except\n logging.exception(e)\n errors.append('Import failed: %s' % e)\n\n if errors:\n transforms.send_json_response(self, 412, '\\n'.join(errors))\n return\n\n course.save()\n transforms.send_json_response(self, 200, 'Imported.')",
"def put_child(course_id, section_id):\n\n section_url = SECTIONS_ENDPOINT + \"/\" + section_id\n course_url = COURSES_ENDPOINT + \"/\" + course_id\n\n # Get course from Parse\n course_connection = httplib.HTTPSConnection(PARSE_API_URL, PARSE_API_PORT)\n course_connection.connect()\n course_connection.request(\n method='GET',\n url=course_url,\n headers={\"X-Parse-Application-Id\": app_id, \"X-Parse-REST-API-Key\": rest_api_key}\n )\n course = json.loads(course_connection.getresponse().read())\n # Add Section id to Course's list\n if course and (section_id not in course[\"sections\"]):\n course[\"sections\"].append(section_id)\n\n # Persist new course\n course_connection.request(\n method='PUT',\n url=course_url,\n body=json.dumps(course),\n headers={\"X-Parse-Application-Id\": app_id, \"X-Parse-REST-API-Key\": rest_api_key}\n )",
"def add_to_postre(self, post_course):\n self.postre[post_course.get_course_id()] = post_course",
"def reload_course(self):\r\n self.course = self.store.get_course(self.course.id)",
"def add(self, ct):\n self[ct._id] = ct",
"def add_course(self, term, schedule, crn):\n query = {'Term': term.code,\n 'Schedule': schedule,\n 'CourseID': crn,\n 'ShowDebug': 0,\n '_': int(float(time.time()) * 10**3)}\n\n self.get(self.ADD_COURSE_ENDPOINT, params=query)"
] | [
"0.7108956",
"0.6971245",
"0.6751544",
"0.6639195",
"0.6605063",
"0.6451658",
"0.6447333",
"0.6298767",
"0.62383056",
"0.6146749",
"0.6086189",
"0.608164",
"0.6031581",
"0.60168093",
"0.5997468",
"0.5956669",
"0.5928258",
"0.5841721",
"0.580762",
"0.57517105",
"0.5742702",
"0.57315797",
"0.5648937",
"0.56286097",
"0.56153387",
"0.5614536",
"0.5612635",
"0.56064945",
"0.55980325",
"0.5542416"
] | 0.74338984 | 0 |
returns True if the list is sorted by Course Number, False otherwise | def is_sorted(self):
cur_list = []
cur_node = self.head
while cur_node is not None:
cur_list.append(cur_node.data.number())
cur_node = cur_node.next
if cur_list == sorted(cur_list):
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_sorted_list(list_):\n prev = -1\n for item in list_:\n if item < prev:\n return False\n prev = item\n return True",
"def is_sorted(l: list):\n for idx, num in enumerate(l):\n if idx is 0:\n continue\n elif l[idx-1] <= num:\n continue\n\n return idx\n\n\n return \"SORTED\"",
"def is_sorted(l):\n\tfor i in range(len(l) - 1):\n\t\tif l[i] > l[i + 1]:\n\t\t\treturn False\n\t\n\treturn True",
"def has_sorted(l):\n return all(l[i] <= l[i+1] for i in xrange(len(l)-1))",
"def is_sorted(self):\n previous = 0 # Setting to 0 shouldn't be an issue aslong as MIN_VALUE is at least 0\n for value in self.data:\n if value < previous:\n return False\n previous = value\n return True",
"def check_sorted(thelist):\n it = iter(thelist)\n next(it, None)\n return all(b >= a for a, b in zip(thelist, it))",
"def is_sorted(a_list):\n sorted_list = sorted(a_list)\n if sorted_list == a_list:\n return True\n else:\n return False",
"def is_ascending(lst):\n for i in range(len(lst) - 1):\n for j in range(i + 1, len(lst)):\n if lst[i] > lst[j]:\n return False\n return True",
"def is_sorted(self):\n\n return all(self.models[i].glb[iage] <= self.models[i+1].glb[iage] for i in range(len(self.models)-1))",
"def is_ascending2(lst):\n for i in range(len(lst) - 1):\n if lst[i] > lst[i + 1]:\n return False\n return True",
"def is_sorted(l):\n\tfor i in range(1, len(l)):\n\t\ttry:\n\t\t\tif l[i].lower() <= l[i-1].lower(): \t\t\t# ignore upper/lowercase with strings\n\t\t\t\treturn False\n\t\texcept: \t\t\t\t\t\t\t\t\t\t# if not strings\n\t\t\tif l[i] <= l[i-1]:\n\t\t\t\treturn False\n\treturn True",
"def check_sorted(self):\n last_count = np.inf\n for count in self.Nx:\n if count > last_count:\n self.sorted = False\n return self.sorted\n last_count = count\n self.sorted = True\n return self.sorted",
"def is_sorted(x):\n l = len(x)\n for i in range(l-1):\n if x[i+1] < x[i]:\n return False\n return True",
"def is_unique_and_sorted(self):\n return all((self(i) < self(i+1) for i in range(len(self)-1))) # all() returns true if all the items in the iterable are TRUE",
"def is_sorted(seq):\n return all(seq[i-1] < seq[i] for i in range(1, len(seq)))",
"def _can_sort(self, *pargs, **kwargs):\n return not bool(self._sort_callback(*pargs, **kwargs))",
"def has_sort(self) -> bool:\n if self.sort_criteria:\n if self.sort_criteria == MHR_NUMBER_PARAM or self.sort_criteria == REG_TYPE_PARAM or \\\n self.sort_criteria == REG_TS_PARAM or self.sort_criteria == CLIENT_REF_PARAM:\n return True\n if self.sort_criteria == SUBMITTING_NAME_PARAM or self.sort_criteria == OWNER_NAME_PARAM or \\\n self.sort_criteria == USER_NAME_PARAM or self.sort_criteria == STATUS_PARAM or \\\n self.sort_criteria == EXPIRY_DAYS_PARAM:\n return True\n return False",
"def check_sort(self):\n if self.list == []:\n return True\n seg_iter = iter(self.list)\n last = next(seg_iter)\n for segment in seg_iter:\n if last > segment:\n raise Exception('non trié')\n last = segment\n return True",
"def is_sorted(items):\n # TODO: Check that all adjacent items are in order, return early if so\n for x in range(len(items)):\n if x < len(items)-1:\n if items[x+1] < items[x]:\n return False\n return True",
"def course_tester(courses):\n\n return False",
"def isSorted(lyst):\n #Cute list comprehension way that doesn't short-circuit.\n #return len([x for x in\n # [a - b for a,b in zip(lyst[1:], lyst[0:-1])]\n # if x < 0]) == 0\n for i in range(1, len(lyst)):\n if lyst[i] < lyst[i-1]:\n return False\n return True",
"def eh_ordenada(l):\n lista= l.copy()\n if l == sorted (lista):\n return True , \"crescente\"\n elif l == sorted (lista,reverse=True):\n return True , \"decrescente\"\n else:\n return False , \"desordenada\"",
"def test_sort_order(self):\n obj = self.conn.search(self.basedn, 2, attrlist=['uidNumber'],\n sort_order=[\"-uidNumber\"])\n sort = [o['uidNumber'][0] for o in obj if 'uidNumber' in o]\n self.assertTrue((all(sort[i] >= sort[i+1]\n for i in range(len(sort)-1))), \"Not sorted\")",
"def is_sorted(self):\n return self._sorted",
"def is_sorted(A: list) -> bool:\r\n\r\n # If it's None, return None\r\n if A is None:\r\n return None\r\n\r\n # If the length is 0 or 1, then\r\n # it's sorted\r\n if len(A) <= 1:\r\n return True\r\n\r\n # If not, let's loop through\r\n curr = A[0]\r\n\r\n for i in range(1, len(A)):\r\n val = A[i]\r\n\r\n # Strictly increasing - so >=\r\n # If it was non-decreasing, then >\r\n if curr >= val:\r\n return False\r\n curr = val\r\n\r\n return True",
"def is_sorted(items):\n # Check that all adjacent items are in order, return early if so\n\n # RANGE \n current = 0\n right = 1\n while right < len(items):\n if items[current] > items[right]:\n return False\n else:\n current += 1\n right += 1\n return True",
"def is_sorted(array):\n if(all(array[i] <= array[i + 1] for i in range(len(array)-1))) or (all(array[i] >= array[i + 1] for i in range(len(array)-1))):\n return True\n return False",
"def _is_lexsorted(self) -> bool:\n return self._lexsort_depth == self.nlevels",
"def should_includes_be_sorted(self):\n return self._filter in ('includesort', 'uncrustify')",
"def is_sorted(some_list):\n # Check if element in list are sorted.\n check = True\n for i in range(len(some_list)):\n if i > 0: #if it's the first element in list, do not thing.\n try:\n if some_list[i] >= some_list[i-1]:\n pass\n else:\n check = False\n except:\n check = False\n return check"
] | [
"0.64845145",
"0.63505226",
"0.6258908",
"0.6216176",
"0.6167742",
"0.610778",
"0.6065763",
"0.60476375",
"0.60458887",
"0.5988047",
"0.5985882",
"0.59754354",
"0.59156555",
"0.5914994",
"0.58944833",
"0.5874584",
"0.5858344",
"0.58307153",
"0.57908547",
"0.57533205",
"0.5724962",
"0.5685796",
"0.5680363",
"0.5642734",
"0.5637038",
"0.5616043",
"0.5602649",
"0.55922407",
"0.5560098",
"0.5527731"
] | 0.6650408 | 0 |
Checks that the right number of sections exist. The prologue before the first section is 0, while subsequent ones are 1, 2, 3, etc. So if you have 3 sections in your code plus the prologue, you should pass in 3 and not 4 to verify that all of them exist. | def check_section_exists(section_number, report=None):
if report is None:
report = MAIN_REPORT
if not report['source']['success']:
return False
found = int((len(report['source']['sections']) - 1) / 2)
if section_number > found:
report.attach('Syntax error', category='Syntax', tool='Source',
group=report['source']['section'],
mistake=("Incorrect number of sections in your file. "
"Expected {count}, but only found {found}"
).format(count=section_number, found=found)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def testSectionCount(self):\n\n self.sectionCount(3640)",
"def check_pe_sections(self, pe):\n res = []\n for section in pe.sections:\n if b\"!This program cannot be run in DOS mode\" in section.get_data()[:400] or\\\n b\"This program must be run under Win32\" in section.get_data()[:400]:\n res.append(section.Name.decode('utf-8').strip('\\x00'))\n\n if len(res) > 0:\n print(\"[+] PE header in sections %s\" % \" \".join(res))\n return True\n return False",
"def _check_nentries(sections, num_def, dat_def):\n print \" * Checking %s\" % num_def\n nentries = int(sections[num_def][\"arguments\"].split()[0])\n if nentries == len(sections[dat_def][\"data\"]):\n return True\n return False",
"def sanity_check_section(self):\n # Note: all addresses here are RVAs\n image_size = self.obj_parent.OptionalHeader.SizeOfImage\n if self.VirtualAddress > image_size:\n raise exceptions.SanityCheckException('VirtualAddress {0:08x} is past the end of image.'.format(self.VirtualAddress))\n if self.Misc.VirtualSize > image_size:\n raise exceptions.SanityCheckException('VirtualSize {0:08x} is larger than image size.'.format(self.Misc.VirtualSize))\n if self.SizeOfRawData > image_size:\n raise exceptions.SanityCheckException('SizeOfRawData {0:08x} is larger than image size.'.format(self.SizeOfRawData))",
"def multiple_sections(): # noqa: D416",
"def test_no_section_by_section(self):\n notice = {\n \"document_number\": \"111-22\",\n \"fr_volume\": 22,\n \"cfr_part\": \"100\",\n \"publication_date\": \"2010-10-10\"\n }\n s = SectionBySection(None, notices=[notice])\n self.assertEqual(None, s.process(Node(label=['100', '22'])))",
"def check_sections(filenames, sections):\n for section in sections:\n # Make sure the path ends with a /\n if not section.endswith(\"/\"):\n section += \"/\"\n pattern = section.replace(\"/\", r\"\\/\") + r\"\\d+.*\"\n for fname in filenames:\n match = re.match(pattern, fname)\n if match is not None:\n return fname\n return False",
"def get_section_number() -> int:\n section_num = input('Enter a section number (1 - 4): ')\n while not (section_num.isdigit() and wf.is_valid_section(int(section_num))):\n print('Invalid section number!')\n section_num = input('Enter a section number (1 - 4): ')\n return int(section_num)",
"def _check_required_section_found(self, docstring: PetscDocStringImpl) -> None:\n if not self and self.required:\n diag = self.diags.section_header_missing\n mess = f'Required section \\'{self.titles[0]}\\' not found'\n docstring.add_diagnostic_from_source_range(\n Diagnostic.Kind.ERROR, diag, mess, docstring.extent, highlight=False\n )\n return",
"def has_section(self, section):\n raise NotImplementedError()",
"def number_of_sections(self):\n #print (len(self.config.sections()))\n return len(self.config.sections())",
"def get_section_hint(state: str) -> int:\n section_nums = [i + 1 for i in range(len(state) // wf.SECTION_LENGTH)]\n random.shuffle(section_nums)\n for section_num in section_nums:\n if not wf.check_section(state, section_num):\n return section_num\n return 0 # should never get here",
"def _check_section_is_not_barren(self, docstring: PetscDocStringImpl) -> None:\n if self and self.barren():\n diag = self.diags.section_barren\n highlight = len(self.lines()) == 1\n mess = 'Section appears to be empty; while I\\'m all for a good mystery, you should probably elaborate here'\n docstring.add_diagnostic_from_source_range(\n Diagnostic.Kind.ERROR, diag, mess, self.extent, highlight=highlight\n )\n return",
"def is_section_exist(self, section_name: str) -> bool:\n pass",
"def test_sections_flat_no_root_section(self):\n title = (\"Neighborhood Outreach for I-70 Alignment Impacting \"\n \"Elyria, Globeville and Swansea\")\n summary = \"\"\"\n The City of Denver and Colorado Department of Transportation \n (CDOT) are working together to do neighborhood outreach\n regarding the I-70 alignment between Brighton Boulevard and\n Colorado. For detailed information on the neighborhood outreach\n efforts please visit www.DenverGov.org/ccdI70.\n \"\"\"\n byline = \"Denver Public Works and CDOT\"\n story = create_story(title=title, summary=summary, byline=byline)\n section1 = create_section(title=\"Background and context\",\n story=story,\n root=False)\n self.assertEqual(story.structure.sections_flat, [])",
"def check_nfaces(sections):\n return _check_nentries(sections, \"NFACES\", \"FACES\")",
"def number_of_sections(self):\n sections = self.config.sections()\n return len(sections)",
"def test_sections_flat_no_root_section(self):\n title = (\"Neighborhood Outreach for I-70 Alignment Impacting \"\n \"Elyria, Globeville and Swansea\")\n summary = \"\"\"\n The City of Denver and Colorado Department of Transportation \n (CDOT) are working together to do neighborhood outreach\n regarding the I-70 alignment between Brighton Boulevard and\n Colorado. For detailed information on the neighborhood outreach\n efforts please visit www.DenverGov.org/ccdI70.\n \"\"\"\n byline = \"Denver Public Works and CDOT\"\n story = create_story(title=title, summary=summary, byline=byline)\n layout = SectionLayout.objects.get(sectionlayouttranslation__name=\"Side by Side\")\n section1 = create_section(title=\"Background and context\",\n story=story, layout=layout,\n root=False)\n self.assertEqual(story.structure.sections_flat, [])",
"def test_sections_flat_one_section(self):\n title = (\"Neighborhood Outreach for I-70 Alignment Impacting \"\n \"Elyria, Globeville and Swansea\")\n summary = \"\"\"\n The City of Denver and Colorado Department of Transportation \n (CDOT) are working together to do neighborhood outreach\n regarding the I-70 alignment between Brighton Boulevard and\n Colorado. For detailed information on the neighborhood outreach\n efforts please visit www.DenverGov.org/ccdI70.\n \"\"\"\n byline = \"Denver Public Works and CDOT\"\n story = create_story(title=title, summary=summary, byline=byline)\n section1 = create_section(title=\"Background and context\",\n story=story,\n root=True)\n self.assertEqual(story.structure.sections_flat, [section1])",
"def section_4_8():\n pass",
"def _check_size_of_lists(sequence_header, secstr_header):\n if len(sequence_header) != len(sequence):\n sys.exit(\"The size of the sequence list and sequence header doesn't match\")\n else:\n return True",
"def check_dataobjs(config, modname, moddict, dataobjs, indent=''):\n\n cnts = [0] * NUMCNTS\n\n # check every file\n if pfwdefs.SW_FILESECT in moddict:\n print \"%sChecking %s section...\" % (indent, pfwdefs.SW_FILESECT)\n for fname, fdict in moddict[pfwdefs.SW_FILESECT].items():\n key = '%s.%s' % (pfwdefs.SW_FILESECT, fname)\n if key not in dataobjs[pfwdefs.SW_INPUTS] and \\\n key not in dataobjs[pfwdefs.SW_OUTPUTS] and \\\n ('listonly' not in fdict or not miscutils.convertBool(fdict['listonly'])):\n warning(indent + ' ', \"%s.%s does not appear in provenance lines\" % \\\n (pfwdefs.SW_FILESECT, fname))\n cnts[WARNCNT_POS] += 1\n\n if key in dataobjs[pfwdefs.SW_INPUTS]:\n cnts2 = check_file_valid_input(config, modname,\n fname, fdict, indent+' ')\n cnts = [x + y for x, y in zip(cnts, cnts2)] # increment counts\n\n\n if key in dataobjs[pfwdefs.SW_OUTPUTS]:\n cnts2 = check_file_valid_output(config, modname,\n fname, fdict, indent+' ')\n cnts = [x + y for x, y in zip(cnts, cnts2)] # increment counts\n\n\n # check every list\n if pfwdefs.SW_LISTSECT in moddict:\n print \"%sChecking %s section...\" % (indent, pfwdefs.SW_LISTSECT)\n for lname, ldict in moddict[pfwdefs.SW_LISTSECT].items():\n key = '%s.%s' % (pfwdefs.SW_LISTSECT, lname)\n if key not in dataobjs[pfwdefs.SW_INPUTS] and \\\n key not in dataobjs[pfwdefs.SW_OUTPUTS]:\n found = False\n if 'columns' in ldict:\n for col in ldict['columns'].split(','):\n nkey = key + \".\" + col\n nkey = nkey.replace('.fullname', '')\n if nkey in dataobjs[pfwdefs.SW_INPUTS] or \\\n nkey in dataobjs[pfwdefs.SW_OUTPUTS]:\n found = True\n # check to see if list def has file name\n if not found:\n nkey = col\n nkey = 'file.' + nkey.replace('.fullname', '')\n if nkey in dataobjs[pfwdefs.SW_INPUTS] or \\\n nkey in dataobjs[pfwdefs.SW_OUTPUTS]:\n found = True\n\n if not found:\n warning(indent + ' ', \"%s.%s does not appear in provenance lines\" % \\\n (pfwdefs.SW_LISTSECT, lname))\n cnts[WARNCNT_POS] += 1\n\n if key in dataobjs[pfwdefs.SW_INPUTS]:\n cnts2 = check_list_valid_input(modname,\n lname, ldict, indent+' ')\n cnts = [x + y for x, y in zip(cnts, cnts2)] # increment counts\n\n return cnts",
"def test_sections_flat_one_section(self):\n title = (\"Neighborhood Outreach for I-70 Alignment Impacting \"\n \"Elyria, Globeville and Swansea\")\n summary = \"\"\"\n The City of Denver and Colorado Department of Transportation \n (CDOT) are working together to do neighborhood outreach\n regarding the I-70 alignment between Brighton Boulevard and\n Colorado. For detailed information on the neighborhood outreach\n efforts please visit www.DenverGov.org/ccdI70.\n \"\"\"\n byline = \"Denver Public Works and CDOT\"\n story = create_story(title=title, summary=summary, byline=byline)\n layout = SectionLayout.objects.get(sectionlayouttranslation__name=\"Side by Side\")\n section1 = create_section(title=\"Background and context\",\n story=story,\n layout=layout,\n root=True)\n self.assertEqual(story.structure.sections_flat, [section1])",
"def load_sections():\n pass",
"def section(code, command, not_read = []):\n sections = list()\n section_str = list()\n present = False\n avoid = False\n times = []\n count_times=0\n times_end=[]\n count_times_end=0\n for i, line in enumerate(code):\n if find_command(line.lower(), command) != None and find_command(line.lower(), 'end') == None and not avoid:\n\n present = True\n count_times=count_times+1\n times.append(count_times)\n elif ((find_command(line.lower(), command) != None and find_command(line.lower(), 'end') != None) or \\\n find_command(line.lower(), 'end'+command) != None) and not avoid:\n\n present = True\n count_times_end=count_times_end+1\n times_end.append(count_times_end) \n if times == times_end:\n present = False\n section_str.append(line) \n sections.append(section_str)\n section_str = list()\n elif not present and any(find_command(line.lower(), avoid_command) != None for avoid_command in not_read) and \\\n find_command(line.lower(), 'end') == None:\n\n avoid = True\n count_times=count_times+1\n times.append(count_times)\n elif not present and ((any(find_command(line.lower(), avoid_command) != None for avoid_command in not_read) and \\\n find_command(line.lower(), 'end') != None) or \\\n any(find_command(line.lower(), 'end'+avoid_command) != None for avoid_command in not_read)):\n\n avoid = True\n count_times_end=count_times_end+1\n times_end.append(count_times_end) \n if times == times_end:\n avoid = False\n if present:\n section_str.append(line)\n return sections",
"def test_sections_flat_no_sections(self):\n title = (\"Neighborhood Outreach for I-70 Alignment Impacting \"\n \"Elyria, Globeville and Swansea\")\n summary = \"\"\"\n The City of Denver and Colorado Department of Transportation \n (CDOT) are working together to do neighborhood outreach\n regarding the I-70 alignment between Brighton Boulevard and\n Colorado. For detailed information on the neighborhood outreach\n efforts please visit www.DenverGov.org/ccdI70.\n \"\"\"\n byline = \"Denver Public Works and CDOT\"\n story = create_story(title=title, summary=summary, byline=byline)\n self.assertEqual(story.structure.sections_flat, [])",
"def test_sections_flat_no_sections(self):\n title = (\"Neighborhood Outreach for I-70 Alignment Impacting \"\n \"Elyria, Globeville and Swansea\")\n summary = \"\"\"\n The City of Denver and Colorado Department of Transportation \n (CDOT) are working together to do neighborhood outreach\n regarding the I-70 alignment between Brighton Boulevard and\n Colorado. For detailed information on the neighborhood outreach\n efforts please visit www.DenverGov.org/ccdI70.\n \"\"\"\n byline = \"Denver Public Works and CDOT\"\n story = create_story(title=title, summary=summary, byline=byline)\n self.assertEqual(story.structure.sections_flat, [])",
"def is_section(line: str) -> bool:\n return len(line) > 0 and (line[0] == '[' and line[len(line) - 1] == ']')",
"def sectional_overwrite_check(self):\n\n for rule in self.options['sectional_overwrite']:\n if self.lineage_test(rule):\n return True\n return False",
"def consecutive_sections(): # noqa: D416"
] | [
"0.6215828",
"0.61386",
"0.578303",
"0.5665832",
"0.56310284",
"0.56031877",
"0.55833596",
"0.5556951",
"0.5545726",
"0.5543077",
"0.5448445",
"0.54293984",
"0.53988886",
"0.53918827",
"0.53827995",
"0.53756946",
"0.5323125",
"0.52977574",
"0.52821344",
"0.5256953",
"0.5239065",
"0.5238089",
"0.52084976",
"0.5163083",
"0.5141528",
"0.51157355",
"0.51157355",
"0.50882447",
"0.50623137",
"0.50358117"
] | 0.6235399 | 0 |
Returns a xapian index document from the context. Introspecting the connection provides the relevant fields available. | def document(self, connection):
doc = xappy.UnprocessedDocument()
for iface in providedBy(self.context):
for field in schema.getFields(iface).values():
if not isinstance(field, (schema.Text, schema.ASCII)):
continue
value = field.query(self.context)
if value is None:
value = u''
if not isinstance(value, (str, unicode)):
value = unicode(value)
doc.fields.append(xappy.Field(field.__name__, value))
return doc | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_index(self, uid: str) -> Index:\n return Index(self.config, uid).fetch_info()",
"def as_search_document(self, index='_all'):\n raise NotImplementedError(\n \"{} does not implement 'get_search_document'.\".format(self.__class__.__name__)\n )",
"def document_indexer(self):\n return self.indexer_by_pid_type(DOCUMENT_PID_TYPE)",
"def index(self):\n return dict(data='index')",
"def index(self):\n return self.container['index']",
"def fetch_search_document(self, index):\n assert self.pk, \"Object must have a primary key before being indexed.\"\n client = get_client()\n return client.get(\n index=index,\n doc_type=self.search_doc_type,\n id=self.pk\n )",
"def get_raw_index(self, uid: str) -> Dict[str, Any]:\n return self.http.get(f'{self.config.paths.index}/{uid}')",
"def document_view(index_name, doc_type, doc_id):\n resp = es.get(index=index_name, doc_type=doc_type, id=doc_id)\n document = resp[\"_source\"]\n print(document)",
"def __init__(self, index, document_id, client=None):\n self.index = index\n self.document_id = document_id\n self.client = client or current_search_client",
"def __init__(self, using=None, index=None, doc_type=None, extra=None):\n self._using = using\n\n self._index = None\n if isinstance(index, (tuple, list)):\n self._index = list(index)\n elif index:\n self._index = [index]\n\n self._doc_type = None\n if isinstance(doc_type, (tuple, list)):\n self._doc_type = list(doc_type)\n elif doc_type:\n self._doc_type = [doc_type]\n\n self.query = ProxyQuery(self, 'query')\n self.filter = ProxyFilter(self, 'filter')\n self.post_filter = ProxyFilter(self, 'post_filter')\n self.aggs = AggsProxy(self)\n self._sort = []\n self._extra = extra or {}\n self._params = {}",
"def index(self):\n return self._index",
"def get_document(obj):\n try:\n return ES.get(\n index=obj.get_index_name(), doc_type=obj.get_document_type(), id=obj.pk)\n except NotFoundError:\n raise DocumentNotFound(obj.get_index_name(), obj.pk)",
"def index(cls, db):\n return (Todo(**doc) for doc in cls.collection(db).find())",
"def get_index(self):\n return self.index",
"def get_index(self):\n return self.index",
"def _index(self):\n return es.index(CLUSTER_NAME, 'record', self.dict, id=self.uuid)",
"def index(self):\n return self._index",
"def index(self):\n return self._index",
"def index(self):\n return self._index",
"def index(self):\n return self._index",
"def index(self):\n return self._index",
"def index(self):\n return self._index",
"def index(self):\n return self._index",
"def index(self):\n return self._index",
"def index(self):\n return self._index",
"def index(self):\n return self._index",
"def index(self):\n return self._index",
"def index_documents(self, DomainName: str) -> Dict:\n pass",
"def get(self, index, id):\n url = f'{self.host}{index}/_doc/{id}'\n resp = requests.get(url)\n return resp.json()",
"def index(self):\n return self._data.get('index')"
] | [
"0.6285873",
"0.58124757",
"0.57936424",
"0.5732638",
"0.56942385",
"0.56683356",
"0.5626097",
"0.5600954",
"0.55877274",
"0.55677587",
"0.5518595",
"0.5501073",
"0.54690313",
"0.54614365",
"0.54614365",
"0.5457262",
"0.5432552",
"0.5432552",
"0.5432552",
"0.5432552",
"0.5432552",
"0.5432552",
"0.5432552",
"0.5432552",
"0.5432552",
"0.5432552",
"0.5432552",
"0.5420452",
"0.5418711",
"0.53924495"
] | 0.60662013 | 1 |
Function to check if the url provided is valid tries to scrape the page for the error message (403) and if found returns False (meaning it's not a valid page) else returns true | def check_url(url: str) -> bool:
try:
potential_error = driver.find_element_by_xpath("/html/body/div[5]/div/div/div[1]/div/div/div/section/div[2]/div").text
if '403' in potential_error:
return True
except:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def url_is_good(url):\n return website_re.match(url)\n # possible validation of reachability of website\n # http_response = requests.get(url)\n # return http_response < 400:",
"def is_valid_url(url: str) -> bool:\n try:\n requests.get(url)\n except requests.exceptions.RequestException:\n return False\n return True",
"def validate_url(url):\n try:\n url.encode('ascii')\n if url.startswith('mailto:'):\n raise ValueError\n result = scraper.get_actual_url(url)\n\n except ValueError:\n print(fg('blue') + '[' + str(datetime.now().time()) + ']' + attr(0),\n fg(1) + 'ERR' + attr(0), url, fg(1) + 'is invalid url' + attr(0))\n return False\n\n except UnicodeEncodeError:\n print(fg('blue') + '[' + str(datetime.now().time()) + ']' + attr(0),\n fg(1) + 'ERR' + attr(0), url, fg(1) + 'has bad characters' + attr(0))\n return False\n\n except TimeoutError:\n print(fg('blue') + '[' + str(datetime.now().time()) + ']' + attr(0),\n fg(1) + 'ERR' + attr(0), url, fg(1) + ':: Operation timed out' + attr(0))\n return False\n\n except ConnectionResetError:\n print(fg('blue') + '[' + str(datetime.now().time()) + ']' + attr(0),\n fg(1) + 'ERR' + attr(0), url, fg(1) + ':: Connection reset by peer' + attr(0))\n return False\n\n except requests.exceptions.HTTPError as err:\n print(fg('blue') + '[' + str(datetime.now().time()) + ']' + attr(0),\n fg(1) + 'ERR' + attr(0), url, fg(1) + str(err) + attr(0))\n return False\n\n except requests.exceptions.RequestException as err:\n print(fg('blue') + '[' + str(datetime.now().time()) + ']' + attr(0),\n fg(1) + 'ERR' + attr(0), url, fg(1) + str(err) + attr(0))\n return False\n\n except:\n return False\n\n else:\n if result:\n return True\n else:\n return False",
"def check_url_and_raise_errors(url: str) -> None:\n if not url:\n raise_error(\"Url can not be empty\", 400)\n\n try:\n URL_REGEX.match(url).span()[1] - URL_REGEX.match(url).span()[0] == len(url)\n except AttributeError:\n raise_error(\"Url should be valid\", 400)",
"def validate_url(url: str) -> None:\n if not is_valid_url(url):\n raise ValueError(f\"Validation Error. Provided url '{url}' is not valid.\")\n try:\n response = requests.get(url)\n except Exception as e:\n raise ValueError(f\"Validation Error. '{url}' website doesn't exists.\")\n else:\n if response.status_code != status.HTTP_200_OK:\n raise ValueError(f\"Validation Error. '{url}' website doesn't exists.\")",
"def is_valid(url):\n\n HAVERFORD_TOKEN = 'Haverford users only'\n INVALID_TOKENS = [HAVERFORD_TOKEN, \"Site Intel\", \"SITE Institute\"]\n content = urlopen(url).read()\n\n for token in INVALID_TOKENS:\n if token in content:\n return False\n return True",
"def url_exists(url):\n\n try:\n connection = urlopen(url)\n return connection.getcode() < 400\n except Exception as e:\n return False",
"def get_check_url(self,url):\n r = requests.get(url).status_code\n if r==requests.codes.ok:\n return(True)\n else:\n print \"something wrong! status_code: \" + r\n return(False)",
"def check_url_availability(url):\n\n response = website_alive.get_response_object(url)\n return response.status_code == requests.codes['ok']",
"def validate_url(url):\n response, content = get_response_from_file(url)\n\n if response == None and content == None:\n response, content = get_response_and_content(url)\n\n if response == None:\n return url, url, 0, \"\", \"N\", \"N\", \"N\", hit(\"No Response\"), \"false\"\n else:\n #print(url, get_visible_text(content))\n return evaluate_content_for_200s(response, url, content)",
"def check_url_invalidity(self) -> bool:\n validate = URLValidator()\n try:\n validate(self.args.url)\n return False\n except ValidationError:\n return True",
"def url_exists(url):\n # Check for URLs we can't validate\n if url.startswith(\"https://kiwiirc.com\"):\n return True\n if url.startswith(\"https://www.projectcalico.org\"):\n return True\n\n try:\n urllib2.urlopen(url)\n return True\n except urllib2.HTTPError, e:\n print_bullet(\"Hit error reading %s: %s\" % (url, e))\n return False\n except urllib2.URLError, e:\n print_bullet(\"Hit error reading %s: %s\" % (url, e))\n return False",
"def url_checker(url_str):\n file_msg = fd.Program_Msg(__file__)\n ## Checking input parameters\n if not (isinstance(url_str, str)):\n msg = '{0} `url_str` ({1}) is not a STRING!'.format(file_msg,\n type(url_str))\n raise LSSUtils_Error(msg)\n ##\n ## Checking Website\n request_url = requests.get(url_str)\n if (request_url.status_code != 200):\n msg = '{0} `url_str` ({1}) does not exist!'.format(file_msg, url_str)\n raise LSSUtils_Error(msg)",
"def is_accessible(url: str) -> bool:\n try:\n return requests.get(url).status_code == requests.codes.ok\n except Exception:\n return False",
"def check_url(url_link):\n res = requests.get(url_link, allow_redirects =True)\n if res.status_code == 200:\n print('valid URL \\n')\n return url_link\n else:\n print('Oupps there is something wrong with your URL. Run the program again!! ')\n return res.status_code",
"def check_if_exist(self,url):\r\n\t\t\"\"\" verefier si un lien existe \"\"\"\r\n\t\trequest = mechanize.Request(url)\r\n\t\tBAD_REQ = [400,401,404]\r\n\t\ttry :\r\n\t\t\tresponse = mechanize.urlopen(request)\r\n\t\t\tif response.code in BAD_REQ:\r\n\t\t\t\treturn False\r\n\t\t\telse:\r\n\t\t\t\treturn True\r\n\t\texcept urllib2.HTTPError, error:\r\n\t\t\tif error.code in BAD_REQ:\r\n\t\t\t\treturn False\r\n\t\t\telse:\r\n\t\t\t\treturn True",
"def _check_page(self, html_content):\n if \"Sign in for the best experience\" in html_content:\n valid_page = False\n elif \"The request could not be satisfied.\" in html_content:\n valid_page = False\n else:\n valid_page = True\n return valid_page",
"def check_url(url):\n # see also http://stackoverflow.com/questions/2924422\n good_codes = [httplib.OK, httplib.FOUND, httplib.MOVED_PERMANENTLY]\n return get_server_status_code(url) in good_codes",
"def urlValidator(url):\n if 'amazon.com/' not in url:\n print('ERROR: Please enter a valid amazon.com URL. (ERROR 1)')\n else:\n validURL = url\n if 'Amazon.com/' not in url:\n print('ERROR: Please enter a valid amazon.com URL. (ERROR 2)')\n else:\n validURL = url\n\n return validURL",
"def is_error_url(self, url):\n self._load_error_urls()\n return url in self.errorurls",
"def valid(url):\n return 0 < len(urlparse(url)[1])",
"def check_url(url):\n good_codes = [httplib.OK, httplib.FOUND, httplib.MOVED_PERMANENTLY]\n return get_server_status_code(url) in good_codes",
"def check_http_response(self, response, url):\n try:\n response.raise_for_status()\n success = True\n except (requests.HTTPError, ValueError):\n success = False\n excpt_msg = \"Invalid API response.\"\n try:\n excpt_msg = response.headers[\"cause-message\"]\n except:\n try:\n excpt_msg = response.json()[\"error\"][\"message\"][\"value\"]\n except:\n excpt_msg = \"Unknown error ('{0}'), check url in a web browser: '{1}'\".format(response.reason, url)\n api_error = EODataDownResponseException(excpt_msg, response)\n api_error.__cause__ = None\n raise api_error\n return success",
"def validate_url(url):\n url_verify = ''\n\n try:\n url_verify = urlopen(url)\n except HTTPError:\n get_user_response(message='Error validating URL: {}'.format(url))\n\n return url_verify",
"def check_link_is_valid(page_link):\n\tnew_page(page_link)\n\tif driver.title == 'Page not found · GitHub':\n\t\tprint('-> \t{} is not valid'.format(page_link))\n\telse:\n\t\tprint('-> \t{} is valid'.format(page_link))",
"def _error(self, url, soup, status, site, log_url=False):\n unexpect = False\n if status == 0:\n print('Unable to connect to website: ' + url)\n elif status >= 400 or soup is None:\n print(str(status) + ' | Can\\'t open website: ' + url)\n else:\n if site < 0:\n print('Unexpected website: ' + url)\n unexpect = True\n else:\n return False # No error\n if log_url and not unexpect:\n self._re_urls.append(url)\n return True # error",
"def isValidURL(self, url):\n if \"imdb.com\" in url:\n return True\n else:\n return False",
"def url_check(url):\n try:\n request = urllib.request.Request(url)\n request.get_method = lambda: 'HEAD'\n urllib.request.urlopen(request)\n return True\n \n except ValueError:\n return False\n\n except urllib.request.HTTPError:\n return False\n \n except URLError:\n return False",
"def isUrlValid(self, url):\n if url is None:\n return False\n elif url.startswith('//'):\n return False\n elif ':' in url:\n return False\n elif url.startswith('/wiki'):\n return True\n elif 'en.wikipedia.org/wiki/' not in url:\n return False\n return True",
"def is_ok(url: str) -> bool:\n try:\n resp = requests.get(url)\n except:\n return False\n return True if math.floor(resp.status_code / 100) == 2 else False"
] | [
"0.7925075",
"0.7531995",
"0.7530058",
"0.7316157",
"0.72739685",
"0.72567844",
"0.71402746",
"0.7119127",
"0.70845497",
"0.70514876",
"0.70416003",
"0.70365214",
"0.7012856",
"0.7010799",
"0.6940691",
"0.69313127",
"0.6929111",
"0.68956256",
"0.6881502",
"0.6879656",
"0.6854721",
"0.68488896",
"0.6798394",
"0.67751175",
"0.6747875",
"0.6741446",
"0.67359126",
"0.67130613",
"0.6697666",
"0.66568"
] | 0.86800694 | 0 |
Tries to click the giving button, using try/except for error catching | def click_button(button_to_click):
try:
button_to_click.click()
except:
print("Button not found") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def atomacclick(objecttoclick):\n try:\n objecttoclick.Press()\n #print \"clicked on : %s\" %objecttoclick\n except Exception as er:\n print \"Not able to click on: %s\" %objecttoclick",
"def clickonbutton(titleobj, buttontoclick):\n try:\n ldtp.click(titleobj,buttontoclick)\n logging.info(\"Clicked on : %s\" % buttontoclick)\n except Exception as er:\n print (\"Not able to click on button\")",
"def _jsclick(self, locator):\n\n self.selenium.wait_until_page_contains_element(locator)\n self.selenium.wait_until_element_is_enabled(locator)\n for should_retry in (True, False):\n try:\n # Setting the focus first seems to be required as of Spring'20\n # (read: without it, tests started failing in that release). I\n # suspect it's because there is a focusOut handler on form\n # fields which need to be triggered for data to be accepted.\n element = self.selenium.get_webelement(locator)\n self.selenium.driver.execute_script(\n \"arguments[0].focus(); arguments[0].click()\", element\n )\n return\n except StaleElementReferenceException:\n if should_retry:\n time.sleep(1)\n else:\n raise",
"def click_button(self):\n self.q(css='div#fixture button').first.click()",
"def robust_click(driver, delay, selector):\n try:\n driver.find_element_by_xpath(selector).click()\n except Exception as e:\n print(\" The job post link was likely hidden,\\n An \" \\\n \"error was encountered while attempting to click link\" \\\n \"\\n {}\".format(e))\n attempts = 1\n clicked = False\n while not clicked:\n try:\n driver.find_element_by_xpath(selector).click()\n except Exception as e:\n pass\n else:\n clicked = True\n print(\" Successfully navigated to job post page \"\\\n \"after {} attempts\".format(attempts))\n finally:\n attempts += 1\n if attempts % 100 == 0:\n print(\"-------------- refreshing page\")\n driver.refresh()\n time.sleep(5)\n if attempts > 10**3:\n print(selector)\n print(\" robust_click method failed after too many attempts\")\n break",
"def click_button(self):\n self.widgets.get('button').click()",
"def click_button(self):\n self.q(css='div#fixture input').first.click()",
"def do_click(self, str_arg):\n arg = validateString(str_arg)\n for tmp in range(REPEAT_TIMES_ON_ERROR):\n try:\n if arg.startswith('('):\n point = self.__getPointXY(arg)\n printLog(self.threadName + '[clicking point %s...]' % arg, logging.DEBUG)\n self.adbc.touch(point[0], point[1], \"DOWN_AND_UP\")\n else:\n if \"/\" not in arg:\n raise ValueError('bad argument of do_click().')\n # get the target view\n tv = self.__getView(arg)\n if tv:\n if DEBUG:\n printLog('Found view %s.' % arg, logging.DEBUG)\n printLog(self.threadName + 'tinyStr: %s' % tv.__tinyStr__(), logging.DEBUG)\n # printLog(self.threadName + 'position and size: {}'.format(tv.getPositionAndSize()),\n # logging.DEBUG)\n printLog(self.threadName + '[clicking id %s...]' % arg, logging.DEBUG)\n tv.touch()\n else:\n printLog('Target view %s not found.' % arg, logging.ERROR)\n self.resultFlag = False\n return\n except Exception, e:\n printLog(self.threadName + 'the %dst try failed due to %s, will retry.' % (tmp, e.message),\n logging.ERROR)\n # self.reconnect()\n time.sleep(1)\n continue\n # finally:\n # printLog(self.threadName + \"[status=%s]\" % self.resultFlag)\n printLog(self.threadName + 'CLICK FAILED: still can\\'t make the click. please check the test environment.',\n logging.CRITICAL)\n self.resultFlag = False",
"def click(self) -> None:\n if self.is_enabled():\n try:\n self.element.click()\n logging.info(\"Class booked!\")\n except:\n logging.info(\"The button could not be clicked, trying to execute the element.\")\n self.driver.execute_script(\"arguments[0].click();\", self.element)\n finally:\n logging.info(\"Could not book the class\")\n\n else:\n warnings.warn('The Button cannot be clicked.')",
"def click(self, element=None):\n\t\tif element is not None:\n\t\t\ttry:\n\t\t\t\tassert(type(element)) == webdriver.firefox.webelement.FirefoxWebElement\n\t\t\t\telement.click()\n\t\t\t\treturn\n\t\t\texcept Exception as e:\n\t\t\t\tprint(\"Unable to click element\\n{}\".format(e))\n\t\t\t\treturn -1\n\t\ttry:\n\t\t\tself.actionObject().click()\n\t\texcept Exception as e:\n\t\t\tprint(f\"{bcolors.FAIL}[!!]Unable to click!{bcolors.ENDC}\\n\")\n\t\t\tprint(\"{}\".format(e))\n\t\t\treturn -1",
"def protectMoreDevices(button):\n try:\n atomacclick(button)\n except Exception as er:\n return False\n print \"Not able to click on protectMoreDevices button\"",
"def click(self, element):\n element.click()",
"def click(self):\r\n pass",
"def click_element(self, el_type, text, delay=3, handle_error=True):\n if el_type not in ['access', 'xpath']:\n LOGGER.error('Mentioned element does not exist!')\n button = None\n else:\n button = self.return_element(el_type=el_type, text=text)\n\n if handle_error:\n try:\n button.click()\n except NoSuchElementException:\n LOGGER.error('{ele} is not found: {err}'.format(ele=el_type, err=text))\n sys.exit(1)\n else:\n button.click()\n time.sleep(delay)",
"def click_error_icon(self):\n self.click_element(self.error_icon_locator)",
"def click(self):\n element = self.element\n WebDriverWait(self._browser, TMO).until(\n lambda browser: element.is_displayed())\n time.sleep(0.1) # Just some pacing.\n element.click()",
"def click(self):\n end_time = time.time() + self.parent.wait_time\n error = None\n while time.time() < end_time:\n try:\n return self._element.click()\n except (\n ElementClickInterceptedException,\n WebDriverException,\n ) as e:\n error = e\n\n raise error",
"def click_submit_button(self):\n self.click(by_locator=self.__ASK_QUESTION_PAGE_ASK_QUESTION_BUTTON)",
"def check_upgrade_button():\n try:\n upgrade = driver.find_element_by_link_text(\"Upgrade Now!\")\n upgrade.click()\n except:\n print \"Upgrade Now! button is not available.\"",
"def clickButton(self, xpath):\n WebDriverWait(self.driver, 20).until(\n EC.element_to_be_clickable((By.XPATH, xpath))).click()\n self.sleep_approx(1)",
"def wait_click_element(self, locator):\n try:\n return WebDriverWait(self.driver, 10).until(ec.element_to_be_clickable(locator))\n except AttributeError as e:\n loger.error('元素不可点击定位出错')\n self.save_screen_shot()\n raise e",
"def _ClickPrimaryActionButton(self):\n self._ExecuteOobeApi('Oobe.clickGaiaPrimaryButtonForTesting')",
"def press(button_id: str) -> None:\n try:\n self.query_one(f\"#{button_id}\", Button).press()\n except NoMatches:\n pass",
"def wait_until_element_is_clickable(self, element):\n try:\n self.wait.until(EC.element_to_be_clickable(element))\n except TimeoutException:\n raise NoSuchElementException(\"UI Element %s not found\" % element[1])\n except Exception as exce:\n raise exce",
"def click(self, browser, locator, sleep_time=3, expl_time=20):\n\n time.sleep(sleep_time)\n try:\n browser.implicitly_wait(5)\n WebDriverWait(browser, expl_time, ignored_exceptions=StaleElementReferenceException).until(\n ec.presence_of_element_located(locator))\n except (NoSuchElementException, TimeoutException, ElementNotInteractableException, StaleElementReferenceException):\n # additional check were deleted, cause of some unexpected timeout exceptions on it\n browser.implicitly_wait(5)\n WebDriverWait(browser, 10).until(ec.element_to_be_clickable(locator))\n self.waiting_loading_element(browser)\n browser.find_element(*locator).click()\n self.waiting_loading_element(browser)",
"def ticket_chooser(ticket):\n try:\n # Find the ticket type's button\n ticket_type = browser.find_element_by_xpath(ticket)\n ticket_type.click()\n\n except Exception as e:\n print(\"Ticket type not found\")",
"def elementClick(self,locator=\"\",locatorType='id',element=None):\n\n\n try:\n if locator:\n element=self.getElement(locator,locatorType)\n\n element.click()\n self.logger.info(\"clicked on element with locator\"+locator+\" locatorType: \"+locatorType)\n\n except:\n self.logger.info('Cannot click on element with locator '+locator+\" locatorType: \"+locatorType)\n print_stack()",
"def wait_for_button(self, button, message=True):\n if message:\n rospy.loginfo(\"Waiting for xbox button: \" + button)\n \n wait_for(lambda: not self.get_button(button) == 0)",
"def click_the_submit_button(self):\n with self._wait_for_page_refresh():\n self.selib.click_button(self.locator.submit_button)",
"def click_login_button(self):"
] | [
"0.6922278",
"0.6865346",
"0.6652217",
"0.66007996",
"0.65745544",
"0.6573612",
"0.649841",
"0.648705",
"0.6485279",
"0.6460392",
"0.63917434",
"0.6329999",
"0.6280454",
"0.6223838",
"0.6120003",
"0.607597",
"0.60168874",
"0.60052997",
"0.60030556",
"0.5995394",
"0.59888935",
"0.5963561",
"0.59454924",
"0.59449816",
"0.59442973",
"0.5925967",
"0.5917987",
"0.5911907",
"0.5911105",
"0.59093505"
] | 0.8195593 | 0 |
Scrpaes the amenities for the web page given the xpath. Returns a list | def find_amenities(xpath: str) -> list:
amenities = driver.find_elements_by_xpath(xpath)
return [amenitie.text for amenitie in amenities] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def collect_amenities(self, url: str) -> None:\n page_source = self.get_page_source(url, \"gmnoprint\")\n soup = BeautifulSoup(page_source, \"html.parser\")\n\n # Get latitude and longitude data\n self.get_coordinates(soup)\n\n # Open amenities url and collect additional data\n try:\n href_url_amenities = soup.find(class_=\"b6xigss dir dir-ltr\").find(\"a\")[\n \"href\"\n ]\n url_amenities = f\"https://www.airbnb.com{href_url_amenities}\"\n\n amenities_page_source = self.get_page_source(url_amenities, \"_vzrbjl\")\n soup = BeautifulSoup(amenities_page_source, \"html.parser\")\n amenities = soup.find_all(class_=\"_1cnse2m\")[1].get_text()\n\n except (AttributeError, TypeError, IndexError):\n amenities = \"\"\n\n if amenities == \"\":\n self.__collected_dic[\"kitchen\"].append(None)\n self.__collected_dic[\"refrigerator\"].append(None)\n self.__collected_dic[\"wifi\"].append(None)\n self.__collected_dic[\"washer\"].append(None)\n self.__collected_dic[\"tv\"].append(None)\n self.__collected_dic[\"parking\"].append(None)\n else:\n self.get_amenity_kitchen(amenities)\n self.get_amenity_refrigerator(amenities)\n self.get_amenity_wifi(amenities)\n self.get_amenity_washer(amenities)\n self.get_amenity_tv(amenities)\n self.get_amenity_parking(amenities)",
"def amenities(self):\n ats = storage.all(Amenity)\n ltats = []\n for objects in ats.values():\n if self.amenity_ids == objects.id:\n ltats.append(objects)\n return ltats",
"def amenities(self):\n list_amenities = []\n for amenity_obj in amenity_ids:\n if amenity_obj.id == self.id:\n list_amenities.append(amenity_obj)\n\n return list_amenities",
"def applicants_skills(driver):\n try:\n raw_skills = driver.find_elements_by_css_selector(\"span.pill\")\n skills = [skill.text for skill in raw_skills] \n return skills\n except Exception as e:\n print(\"error acquiring applicant skills\")\n print(e)\n return []",
"def do_extract(self, xpath):\n s = Selector(self.driver.page_source)\n for i, result in enumerate(s.xpath(xpath).getall(), 1):\n print(i, result)",
"def amenities(self):\n all_amenities = models.storage.all(Amenity)\n places = []\n for k, v in all_amenities.items():\n if v.id in self.amenity_ids:\n places.append(v)\n return places",
"def get_menu_items(self):\n items = []\n for number in range(1, 7):\n xpath = self.MENU_ITEM_XPATH.format(number)\n items.append(self.get_element((By.XPATH, xpath)))\n return items",
"def navigate_and_extract_avalanche_data(self):\n self.browser.get(self.url)\n avalanche_status = {}\n try:\n avalanche_level = self.browser.find_element_by_xpath(\n '//*[@id=\"law-master\"]/div[1]/div[1]/span/span')\n avalanche_status['avalanche_level'] = avalanche_level.text\n avalanche_warning_published = (\n self.browser.find_element_by_class_name('law-mst-iat'))\n avalanche_status['avalanche_warning_published'] = (\n avalanche_warning_published.text)\n avalanche_warning_valid_until = (\n self.browser.find_element_by_class_name('law-mst-exp'))\n avalanche_status['avalanche_warning_valid_until'] = (\n avalanche_warning_valid_until.text)\n avalanche_description = (\n self.browser.find_element_by_class_name(\"law-mst-dsc\"))\n avalanche_status['avalanche_description'] = (\n avalanche_description.text.replace('\\n', ' '))\n except NoSuchElementException as error:\n logging.info(f\"\"\"During scraping a website: {self.url} error has\n occured {error}\"\"\")\n return avalanche_status",
"def amenities(self):\n ''' for row in place_amenity: row.place_id and amenity.id\n == row.amenity_id:'''\n amenList = []\n for amenity in storage.all(Amenity).value():\n if self.id == amenity.place_id:\n amenList.append(amenity)\n return(amenList)",
"def parse_page(self, response):\n hrefs = response.xpath('//ul[@role=\"main\"]/a/@href').extract()\n\n category = response.xpath(\n '//ol[@data-css-rczytq=\"\"]/li[last()]/a/text()').extract_first()\n\n for href in hrefs:\n yield scrapy.Request(url=href, callback=self.parse_product, meta={\"category\": category})",
"def get_apartment_amenities(self, soup, apartment_dict):\n\n amenities_list_container = soup.find('div', class_='amenities')\n amenities_list = []\n for spantag in amenities_list_container.find_all('span', class_='amenity'):\n amenities_list.append(spantag.text.strip())\n apartment_dict['amenities'] = amenities_list",
"def scrape_categories():\n category_urls = []\n url = homepage + '/alle-categorieen/'\n response = rq.get(url, timeout=5)\n soup = BeautifulSoup(response.content, 'html.parser')\n main_soup = soup.find('main')\n\n for category in main_soup.find_all('a', {'href': re.compile(r'/overzicht/')}):\n category_urls.append(category['href'])\n\n return category_urls",
"def amenities_all():\n return jsonify(list(map(lambda x: x.to_dict(),\n list(storage.all(Amenity).values()))))",
"def find_elements(self, xpath:str):\n try:\n elements = self.driver.find_elements_by_xpath(xpath)\n \n except NoSuchElementException:\n elements = []\n \n return elements",
"def xmlextractor(url):\r\n\tListOfLinks = []\r\n\tresponse = urlopen(url)\r\n\troot = ET.fromstring(response.read())\r\n\tfor link in root.iter('{http://www.sitemaps.org/schemas/sitemap/0.9}loc'):\r\n\t\tListOfLinks.append(link.text)\r\n\treturn ListOfLinks",
"def elements(xpath_selection):\n driver = Driver().connect()\n return driver.find_elements_by_xpath(xpath_selection)",
"def parse_html(self, input: str, xpath: str) -> []:\n tree = html.fromstring(input)\n return tree.xpath(xpath)",
"def applicants_locations(driver):\n applicants_info = {}\n try:\n elem = driver.find_elements_by_css_selector(\"a.location-title\")\n for i in range(len(elem)):\n # city and applicants are separated by a new line\n city, applicants = elem[i].text.split('\\n')\n # get number of applicants by removing the word 'applicants'\n applicants = applicants[:applicants.find(\" applicants\")]\n # enter, typically, three applicant location data pairs\n location_data = {\n \"city\" : city, \n \"applicants\" : applicants\n }\n applicants_info[\"location\" + str(i + 1)] = location_data\n except Exception as e:\n print(\"error acquiring applicants locations\")\n print(e)\n return applicants_info",
"def amenities(self):\n G, mapping = self.network()\n waste = []\n resources = []\n intmed_products = []\n\n for nd in G:\n # if nd[0] != \"r\":\n if not isinstance(nd, int):\n if not G.in_edges(nd):\n resources.append(nd)\n elif not G.out_edges(nd):\n if nd != self.commodity:\n waste.append(nd)\n else:\n intmed_products.append(nd)\n\n return waste, resources, intmed_products",
"def find_gp_categories_links(html):\n links = []\n for m in re.finditer('href=\"(/store/apps/category/[^\"]+)\"', html):\n #print '%02d-%02d: %s' % (m.start(), m.end(), m.group(1))\n links.append(m.group(1))\n return links",
"def get_all_menu():",
"def xpath(element, xpath):\n for ns in namespaces:\n xpath_fmt = xpath.format(ns=\"\" if ns is None else \"adm:\")\n for found_element in element.xpath(xpath_fmt,\n namespaces=dict() if ns is None else dict(adm=ns)):\n yield found_element",
"def get_descendant_elements(self, xpath) -> list:\n tmp_xpath = self._chain_xpath(xpath)\n tmp_loc = (By.XPATH, tmp_xpath)\n return self._wait.until(EC.visibility_of_all_elements_located(tmp_loc))",
"def check_and_get_all_elements_by_xpath(element, xpath):\r\n if element is None or not xpath:\r\n return []\r\n try:\r\n return element.find_elements_by_xpath(xpath)\r\n except NoSuchElementException:\r\n return []",
"def get_a_elems_for_papers(tree):\n\n\tns = 'http://exslt.org/regular-expressions'\n\tpath = '//a[re:match(@href, \"http://eprints.gla.ac.uk/[0-9]+/\")]'\n\ta_elems = tree.xpath(path, namespaces={'re':ns})\n\treturn a_elems",
"def get_ids(self):\n page = r.get(self.url)\n tree = html.fromstring(page.content)\n ids_elements = tree.xpath(\"//div[@id='selectedcontent']/div/ul/li/a\")\n return [self._e_to_id(e) for e in ids_elements]",
"def iterate_on_items(pagecode):\n parser = etree.HTMLParser()\n \n tree = etree.parse(StringIO(pagecode), parser)\n\n # xpath = \"/html/body/div[3]/div[3]/div[3]/ul/li[83]/a/span/span[2]\"\n span_class = \"wb-itemlink-id\"\n request = tree.xpath('//span[@class=\"{}\"]'.format(span_class))\n for span in request:\n yield span.text",
"def get_elements_from_page(pagetree, css):\n\n # Have to convert the CSS selectors to XPATH selectors (gross).\n try:\n expression = GenericTranslator().css_to_xpath(css)\n except SelectorError:\n print('Invalid selector.')\n return\n elements = pagetree.xpath(expression)\n return elements",
"def get_item_links_in_page(self):\n css_selector = \"td.id > a\"\n id_links = self.driver.find_elements_by_css_selector(css_selector)\n return id_links",
"def amenities(self, amn):\n if type(amn) is Amenity:\n self.amenity_ids.append(str(amn.id))"
] | [
"0.6214858",
"0.6088792",
"0.59633666",
"0.57775974",
"0.5686356",
"0.5672884",
"0.56672996",
"0.56397474",
"0.56136924",
"0.5509259",
"0.5411635",
"0.54108214",
"0.53939843",
"0.5365997",
"0.5331163",
"0.53094465",
"0.530245",
"0.52577066",
"0.5229415",
"0.51995695",
"0.5191019",
"0.51872647",
"0.5183159",
"0.51828974",
"0.51731306",
"0.51460624",
"0.51365286",
"0.5119838",
"0.5115324",
"0.5090576"
] | 0.7940574 | 0 |
Returns the text of a given tag / class id within a Beautiful Soup objhect() | def find_data_in_soup(soup, tag: str, class_id:str) -> str:
return soup.find(tag, class_=class_id).get_text() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def text_of(soup):\n return ''.join([str(x) for x in soup.findAll(text=True)])",
"def getHTMLTag(self, html, tag):\n soup = BeautifulSoup(html, 'html.parser')\n content = soup.find(tag)\n return content",
"def get_text_only(self, soup):\n val = soup.string\n # see if we have a text element\n if val is None:\n conts = soup.contents\n resulttext = ''\n # not text so continue recursing through the tags\n for tag in conts:\n subtext = self.get_text_only(tag)\n resulttext += subtext + '\\n'\n return resulttext\n return val.strip()",
"def get_text():\n global x\n for i in soup.body(\"aside\", {\"id\": \"text-2\"}):\n x = i.get_text()",
"def extract_element_data(soup, params):\r\n \r\n # 1. Find the right tag\r\n if 'class' in params:\r\n elements_found = soup.find_all(params['tag'], params['class'])\r\n else:\r\n elements_found = soup.find_all(params['tag'])\r\n \r\n # 2. Extract text from these tags\r\n if 'get' in params:\r\n element_texts = [el.get(params['get']) for el in elements_found]\r\n else:\r\n element_texts = [el.get_text() for el in elements_found]\r\n \r\n # 3. Select a particular text or concatenate all of them\r\n tag_order = params.get('order', 0)\r\n if tag_order == -1:\r\n output = '**__**'.join(element_texts)\r\n else:\r\n output = element_texts[tag_order]\r\n \r\n return output",
"def find_text_content_by_class(bs, tag, class_name):\n result = []\n for item in bs.find_all(tag, {\"class\":class_name}):\n item_text = strip_tags(str(item))\n result.append(\" \".join(item_text.split()))\n return result",
"def _get_text(raw_html):\n bs = BeautifulSoup(raw_html)\n text_nodes = bs.find_all(_is_text_tag)\n text_elements = [_get_child_text(node) for node in text_nodes]\n return ' '.join(chain(*chain(*text_elements)))",
"def extractText(postSoup):\n for tag in postSoup.findAll(True):\n if tag.name in (\"code\"):\n tag.extract()\n else:\n tag.hidden=True\n\n return postSoup.renderContents()",
"def get_text_hook(raw):\n soup = bs4.BeautifulSoup(quopri.decodestring(raw), features=\"lxml\")\n return soup.text",
"def text_from_html(soup):\n\n texts = soup.findAll(text=True)\n visible_texts = filter(tag_visible, texts)\n\n return u\" \".join(t.strip() for t in visible_texts)",
"def getTag(self, tag_to_find):\n tree = xml.parse(\"lesson\" + str(self.__id_lesson) + \".xml\")\n root = tree.getroot()\n for tag in root:\n if tag.tag == tag_to_find:\n return tag.text",
"def find_text (node, tag):\n rc = \"\"\n n = node.find (\".//%s\" % tag)\n if n is not None:\n rc = n.text\n return rc",
"def find_text_in_content(self, el):\n try:\n content_divs = [el.get_element_by_id(\"content\")]\n except KeyError:\n # try class\n content_divs = el.find_class(\"content\")\n\n if content_divs == []:\n return None\n \n # iterate over divs and extract text\n all = []\n for div in content_divs:\n r = self.find_text_in_p(div)\n all.append(r)\n return \" \".join(all)",
"def get_text(self, selector):\n el = self.locate_element(selector)\n return el.text",
"def extract_post_text(id, posts):\n try:\n post = posts.find(\"./*[@Id='{id}']\".format(id=id))\n return clean_up(post.attrib['Title'],False) + ' ' + clean_up(post.attrib['Body'],True)\n except AttributeError:\n return None\n except KeyError:\n return None",
"def get_text(self):\n text_element = self.page.find(id=self.text_location)\n return text_element.get_text()",
"def read_text_from_span_id(html, span_id):\n return html.find('span', {'id': span_id}).text",
"def get_text(element):\n if element.tag.split('}')[-1] == 'h3':\n return \"\\n\" # New section (double newline)\n return re.sub(\"\\s+\", \" \", ((element.text or '') + ''.join(map(get_text, element)) + (element.tail or '')))",
"def _text_of(self, elem):\n if isinstance(elem, Tag):\n text = [ ]\n for sub_elem in elem:\n text.append(self._text_of(sub_elem))\n\n return \" \".join(text)\n else:\n return elem.string",
"def extractContent(content):\n soup = BeautifulSoup(content, 'html.parser')\n return soup.get_text()",
"def get_text(self):",
"def get_inner_text(self, css_selector):\n element = self.driver.find_elements_by_css_selector(css_selector)\n if len(element) > 0:\n information = element[0].get_attribute(\"innerText\").strip()\n return information",
"def find_span(container: bs, tag: str, class_: str) -> Union[str, None]:\n\n found = container.find(tag, class_=class_)\n if found is None:\n return None\n return found.text.strip()",
"def extract_text(td):\n text = td.find('a',href=True).text\n return text",
"def __getTagText(self, tag):\n return ''.join(tag.findAll(text=True)).replace(unichr(160), ' ')",
"def get_certain_joke(html):\n soup = BeautifulSoup(html, 'lxml')\n\n # for i in len(soup.select('div.content')):\n # print(soup.select('div.content')[i].get_text())\n\n joke_content = soup.select('div.content')[0].get_text()\n\n return joke_content",
"def _get_element_text(self, selector):\r\n text_list = self._find_within(selector).text\r\n return text_list[0] if text_list else None",
"def extractText(html_code):\n html_tree = html.fromstring(html_code)\n chapter_list = html_tree.find_class(\"chapter\")\n chapter_text = chapter_list[0].text_content()\n return chapter_text",
"def nameGet(soup):\n name = soup.find('span', id='title', class_='a-size-small')\n name = name.text\n name = name.strip()\n\n return name",
"def text(element):\n text = element.text\n return text if text is not None else \"\""
] | [
"0.65025955",
"0.64949864",
"0.64455515",
"0.6300595",
"0.624028",
"0.61896014",
"0.61368173",
"0.6121002",
"0.60955775",
"0.6071008",
"0.6028691",
"0.59635437",
"0.5934924",
"0.5929046",
"0.5905635",
"0.5897759",
"0.5881083",
"0.5818709",
"0.581585",
"0.580303",
"0.57516354",
"0.5728967",
"0.57126915",
"0.5703741",
"0.5702625",
"0.56957155",
"0.5667901",
"0.5656796",
"0.56444365",
"0.5628584"
] | 0.8107418 | 0 |
Create the large attribute dictionary. | def create_data_set(num_attributes):
data_set = {}
for index in range(num_attributes):
size = random.randint(1, 10) # nosec
key = str(index).encode("utf-8")
data_set[key] = get_random_bytes(size)
return data_set | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _init_attributes(self):\n self.attr = {\n 'name': None,\n 'tags': [],\n 'openHours': None,\n 'type': None,\n 'parent': None,\n 'locationId': None,\n 'bannerAbbreviation': None,\n 'arcGisAbbreviation': None,\n 'geoLocation': None,\n 'geometry': None,\n 'summary': None,\n 'description': None,\n 'descriptionHtml': None,\n 'address': None,\n 'city': None,\n 'state': None,\n 'zip': None,\n 'county': None,\n 'telephone': None,\n 'fax': None,\n 'thumbnails': [],\n 'images': [],\n 'departments': [],\n 'website': None,\n 'sqft': None,\n 'calendar': None,\n 'campus': None,\n 'girCount': None,\n 'girLimit': False,\n 'girLocations': None,\n 'synonyms': [],\n 'bldgId': None,\n 'parkingZoneGroup': None,\n 'propId': None,\n 'adaParkingSpaceCount': None,\n 'motorcycleParkingSpaceCount': None,\n 'evParkingSpaceCount': None,\n 'weeklyMenu': None,\n 'notes': None,\n 'labels': {},\n 'steward': None,\n 'shape': {}\n }",
"def _build_attributes(self):\n\n # We might rebuild the program because of snippets but we must\n # keep already bound attributes\n\n dtype = []\n for (name,gtype) in self.all_attributes:\n if name not in self._attributes.keys():\n attribute = Attribute(self, name, gtype)\n else:\n attribute = self._attributes[name]\n\n self._attributes[name] = attribute\n dtype.append(attribute.dtype)",
"def attributes_metadata(self):\n\n attribute_meta = collections.defaultdict(dict)\n\n for attribute in self.attributes:\n attribute_meta[attribute.name]['valuemap'] = attribute.valuemap\n attribute_meta[attribute.name]['qualifiers'] = attribute.qualifiers\n\n return dict(attribute_meta)",
"def get_attributes(self):\n \n retdict = {}\n retdict['s'] = str(self.s)\n if self.t != None:\n retdict['t'] = str(self.t)\n retdict['a'] = str(self.a)\n retdict['b'] = str(self.b)\n retdict['c'] = str(self.c)\n retdict['d'] = str(self.d)\n return retdict",
"def __init__(self, *args, **kwargs):\n super(AttrDict, self).__init__(*args, **kwargs)\n self.__dict__ = self",
"def attrdict_constructor(loader, node):\n return AttrDict(loader.construct_mapping(node))",
"def load_attributes():\n\n # <attribute_id> <attribute_name>\n attributes_file = open(PROJECT_ROOT +'/data/attributes.txt').readlines()\n attributes_file = [i.strip().split() for i in attributes_file]\n\n # <certainty_id> <certainty_name>\n certainties_file = open(PROJECT_ROOT +'/data/CUB_200_2011/attributes/certainties.txt').readlines()\n certainties_file = [i.strip().split() for i in certainties_file]\n\n # <image_id> <attribute_id> <is_present> <certainty_id> <time>\n labels_file = open(PROJECT_ROOT +'/data/CUB_200_2011/attributes/image_attribute_labels.txt').readlines()\n labels_file = [i.strip().split() for i in labels_file]\n\n attribute_ids = {}\n for i in attributes_file:\n attribute_ids[i[1]] = int(i[0])\n\n certainty_ids = {}\n for i in certainties_file:\n certainty_ids[i[1]] = int(i[0])\n\n label_ids = {}\n for i in labels_file:\n label_ids[(int(i[0]), int(i[1]))] = list(map(lambda x:int(float(x)), i[2:]))\n\n return attribute_ids, certainty_ids, labels_file, label_ids",
"def _get_attribute_dic(self, attrs):\n attr_dic = {}\n for attr_pair in attrs:\n attr_dic[attr_pair[0]] = attr_pair[1]\n return attr_dic",
"def __attrs_post_init__(self):\n self.key = uuid.uuid4().hex\n if self.properties is None:\n self.properties = {}\n if self.is_image:\n try:\n img_size = Image.open(self.open()).size\n self.properties.update(width=img_size[0], height=img_size[1])\n except IOError:\n self.content_type = 'application/octet-stream'",
"def _build_primitive(self, attributes, indices, material, mode):\n new_primitive = {}\n\n properties_key = [\"attributes\", \"indices\", \"material\", \"mode\"]\n properties_val = [attributes, indices, material, mode]\n for key, val in zip(properties_key, properties_val):\n if val is not None:\n new_primitive[key] = self._resolve_mapping(inp=val, mapping=self.accessors_map)\n\n return new_primitive",
"def to_attr(self, session):\n # \"object_dict\" and \"tree\" uses LargeBinary sqlalchemy datatype.\n # LargeBinary sqlalchemy datatype needs \"bytes-like\" object\n # and in Py2, string are bytes-like objects while in Py3 they aren't.\n # So we need to store \"bytes-like\" object in DB objects.\n # For getting the resource attr dict from model object\n # we need to decode to native string (for Py3 compatibility).\n # Since in Py2, string are bytes-like objects, decoding won't\n # make a difference.\n attr_dict = {}\n for k in dir(self):\n if (not k.startswith('_') and\n k not in getattr(self, '_exclude_to', []) and\n not callable(getattr(self, k))):\n if k == 'object_dict':\n v = self.get_attr(session, k)\n if isinstance(v, bytes):\n attr_dict[k] = v.decode('utf-8')\n else:\n attr_dict[k] = v\n elif k == 'tree':\n v = self.get_attr(session, k)\n if isinstance(v, bytes):\n attr_dict[k] = v.decode('utf-8')\n else:\n attr_dict[k] = v\n else:\n attr_dict[k] = self.get_attr(session, k)\n return attr_dict",
"def build_attributes(\n cls,\n attributes: Dict[str, Any],\n namespace: ConfigNamespace\n ) -> Dict[str, Any]:\n config_path = attributes.get('config_path')\n tokens = {}\n\n def build_config_key(value_def: ValueTypeDefinition, config_key: str) -> str:\n key = value_def.config_key or config_key\n return f\"{config_path}.{key}\" if config_path else key\n\n def build_token(\n name: str,\n value_def: ValueTypeDefinition\n ) -> Tuple[str, property]:\n config_key = build_config_key(value_def, name)\n value_token = ValueToken.from_definition(\n value_def, namespace, config_key)\n getters.register_value_proxy(namespace, value_token, value_def.help)\n tokens[name] = value_token\n return name, build_property(value_token)\n\n def build_attr(name: str, attribute: Any) -> Tuple[str, property]:\n if not isinstance(attribute, ValueTypeDefinition):\n return name, attribute\n return build_token(name, attribute)\n\n attributes = dict(build_attr(*item)\n for item in attributes.items())\n attributes['_tokens'] = tokens\n return attributes",
"def to_dict(self):\n d = {}\n i = 0\n for entry in self.entries:\n d[i] = {}\n attributes = self.get_attribute_list()\n print (attributes)\n for data in attributes:\n d[i][data] = entry.__getattribute__(data)\n i = i + 1\n return d",
"def _save_live_attrs(lock_detail):\n return {attr: getattr(lock_detail, attr) for attr in API_CACHED_ATTRS}",
"def to_dict(\n self,\n attributes: Iterable[str] = (\"xyz\", \"viewdir\", \"imgsz\", \"f\", \"c\", \"k\", \"p\"),\n ) -> Dict[str, tuple]:\n return {key: helpers.numpy_to_native(getattr(self, key)) for key in attributes}",
"def get_attributes(self) -> Dict[str, str]:\n pass",
"def createDictionary(self):\n\t\tdictionary: dict = {}\n\t\tdictionary.update({'deckname': self.mDeckName})\n\t\tdictionary.update({'filename': self.autoFilename})\n\t\tdictionary.update({'creatorname': str(self.mCreatorname)})\n\t\tdictionary.update({'maxAttrPoints': str(self.mMaxAttributePoints)})\n\t\tminionListDict: dict = {}\n\t\tfor minion in self.mMinionSet:\n\t\t\tminionDict: dict = {}\n\t\t\tminionDict.update({'minionName': str(minion.mMinionName)})\n\t\t\tminionDict.update({'attack': str(minion.mAttackPoints)})\n\t\t\tminionDict.update({'hp': str(minion.mHealthPoints)})\n\t\t\tskillList: list = minion.mSkills\n\t\t\tskillNames: list = []\n\t\t\tfor skill in skillList:\n\t\t\t\tskillNames.append(skill.mSkillName)\n\t\t\tminionDict.update({'skills': skillNames})\n\t\t\tminionListDict.update({minion.mMinionName: minionDict})\n\t\tdictionary.update({'minions': minionListDict})\n\t\tdictionary.update({'id' : hash(str(dictionary))}) # TODO LPO: let DB handle that\n\t\tself.mDeckDict = dictionary\n\t\treturn dictionary",
"def _write_attrs(self, title):\n # XXX: Should probably all be defined in some header file.\n self._f.attrs['api_version'] = np.float32([6.30000019])\n self._f.attrs['version'] = np.float32([6.30000019])\n self._f.attrs['floating_point_word_size'] = \\\n np.array([self.__f_word_size], dtype=np.int32)\n self._f.attrs['file_size'] = np.array([1], dtype=np.int32)\n self._f.attrs['maximum_name_length'] = np.array([32],\n dtype=np.int32)\n self._f.attrs['int64_status'] = np.array([0], dtype=np.int32)\n self._f.attrs['title'] = np.string_(title)",
"def dict_with_attrs2(*args):\n class CustomDict(object):\n __slots__ = args\n __dict__ = {}\n\n def __init__(self, *args, **kwargs):\n super(CustomDict, self).__init__()\n if args:\n self.__dict__.update(*args)\n\n for k, v in kwargs.iteritems():\n setattr(self, k, v)\n\n def __getitem__(self, key):\n return self.__dict__[key]\n\n def __setitem__(self, key, val):\n self.__dict__[key] = val\n\n def __delitem__(self, key):\n del self.__dict__[key]\n\n def __getattr__(self, name):\n return self.__dict__[name]\n\n return CustomDict",
"def _init_attributes(self):\n if os.name == \"nt\":\n if \"64\" in platform.architecture()[0]:\n platform_arch = \"x86_64\"\n elif \"32\" in platform.architecture()[0]:\n platform_arch = \"i386\"\n else:\n platform_arch = platform.architecture()\n os_ver = f\"Windows-{platform.win32_ver()[1]}\"\n else:\n platform_arch = platform.machine()\n if platform.system() == \"Darwin\":\n os_ver = f\"macOS-{platform.mac_ver()[0]}\"\n else:\n os_ver = \"-\".join(linux_distribution()[0:2])\n\n license_chunks = LICENSE.split(\" \")\n if license_chunks[0] == \"GPLv2\":\n client_license = \"GPL-2.0\"\n else:\n client_license = \"Commercial\"\n\n default_attributes = {\n # Process id\n \"_pid\": str(os.getpid()),\n # Platform architecture\n \"_platform\": platform_arch,\n # OS version\n \"_os\": os_ver,\n # Hostname of the local machine\n \"_source_host\": socket.gethostname(),\n # Client's name\n \"_client_name\": \"mysql-connector-python\",\n # Client's version\n \"_client_version\": \".\".join([str(x) for x in VERSION[0:3]]),\n # Client's License identifier\n \"_client_license\": client_license,\n }\n self._settings[\"attributes\"].update(default_attributes)\n\n if \"connection-attributes\" in self._settings:\n for attr_name in self._settings[\"connection-attributes\"]:\n attr_value = self._settings[\"connection-attributes\"][attr_name]\n # Validate name type\n if not isinstance(attr_name, str):\n raise InterfaceError(\n f\"Attribute name '{attr_name}' must be a string type\"\n )\n # Validate attribute name limit 32 characters\n if len(attr_name) > 32:\n raise InterfaceError(\n f\"Attribute name '{attr_name}' exceeds 32 characters \"\n \"limit size\"\n )\n # Validate names in connection-attributes cannot start with \"_\"\n if attr_name.startswith(\"_\"):\n raise InterfaceError(\n \"Key names in 'session-connect-attributes' cannot \"\n f\"start with '_', found: {attr_name}\"\n )\n # Validate value type\n if not isinstance(attr_value, str):\n raise InterfaceError(\n f\"Attribute name '{attr_name}' value '{attr_value}' \"\n \" must be a string type\"\n )\n\n # Validate attribute value limit 1024 characters\n if len(attr_value) > 1024:\n raise InterfaceError(\n f\"Attribute name '{attr_name}' value: '{attr_value}' \"\n \"exceeds 1024 characters limit size\"\n )\n\n self._settings[\"attributes\"][attr_name] = attr_value",
"def add_attributes(self, attributes):\n self.attributes = dict(self.attributes, **attributes)",
"def dict_with_attrs(*args):\n class CustomDict(dict):\n __slots__ = args\n\n def __init__(self, *args, **kwargs):\n super(CustomDict, self).__init__(*args)\n\n for k, v in kwargs.iteritems():\n setattr(self, k, v)\n\n return CustomDict",
"def build_attrs(self, base_attrs, extra_attrs=None, **kwargs):\n attrs = dict(base_attrs, **kwargs)\n if extra_attrs:\n attrs.update(extra_attrs)\n return attrs",
"def uncleanable():\n data = attrdict.AttrDict()\n data.backup_ids = set()\n data.image_ids = set()\n data.keypair_ids = set()\n data.server_ids = set()\n data.nodes_ids = set()\n data.chassis_ids = set()\n data.snapshot_ids = set()\n data.transfer_ids = set()\n data.volume_ids = set()\n return data",
"def _set_attributes(self):",
"def _attribs(self, name=None, description=None):\n a = {}\n if name:\n a['name'] = name\n if description:\n a['description'] = description\n return a",
"def dict(self) -> dict():\n\n dict_reg_hive = {}\n\n for _attribute in self.attributes.__dict__.items():\n if isinstance(_attribute[1], str):\n if not True in [_attribute[1].startswith(prefix) for prefix in ['<', 'providers.', 'None']]:\n _attribute_value = getattr(self, _attribute[1])\n dict_reg_hive.update({_attribute[1]: _attribute_value})\n\n return dict_reg_hive",
"def to_dictionary(self):\n attributes = [\"id\", \"size\", \"x\", \"y\"]\n return {key: getattr(self, key) for key in attributes}",
"def attribute_dict(self):\n return self.__attribute_dict",
"def __init__(self, **attributes):\n for key, value in attributes.items():\n setattr(self, key, value)"
] | [
"0.6525995",
"0.64443743",
"0.6089407",
"0.60877615",
"0.60155773",
"0.5991732",
"0.58783555",
"0.58489907",
"0.58311176",
"0.5822948",
"0.5812633",
"0.5803778",
"0.57970864",
"0.578953",
"0.57819146",
"0.5750089",
"0.57281816",
"0.57055736",
"0.57011706",
"0.5686207",
"0.56835806",
"0.5679717",
"0.5675287",
"0.5673228",
"0.5668872",
"0.5666856",
"0.56376356",
"0.5636111",
"0.5635911",
"0.5606692"
] | 0.65942544 | 0 |
verify the Attributes value after get_attr | def verify_get_attr(self, indata, outdata):
decoded = {}
for key, val in outdata.items():
if isinstance(val, bytes):
decoded[key.decode()] = val
else:
decoded[key] = base64.b64decode(val)
self.log.info("Verifying get_attr output:")
self.log.info(" get_attr data: %s", indata)
self.log.info(" set_attr data: %s", decoded)
for attr, value in indata.items():
if value != decoded.get(attr.decode(), None):
self.fail(
"FAIL: Value does not match after get({}), Expected "
"val={} and received val={}".format(attr, value,
decoded.get(attr.decode(), None))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_attributes(self):\n pass",
"def test_get_attribute_data(self):\n pass",
"def GetValidAttributeValues(self, attr):\n return None",
"def verify_get_attr(self, indata, outdata):\n decoded = {}\n for key, val in outdata.items():\n if isinstance(val, bytes):\n # The API returns the values as bytes already.\n decoded[key.decode()] = val\n else:\n # The JSON output encodes the bytes as base64, so\n # we need to decode them for comparison.\n decoded[key] = base64.b64decode(val)\n\n self.log.info(\"Verifying get_attr output:\")\n self.log.info(\" get_attr data: %s\", indata)\n self.log.info(\" set_attr data: %s\", decoded)\n\n for attr, value in indata.items():\n if value != decoded.get(attr.decode(), None):\n self.fail(\n \"FAIL: Value does not match after get({}), Expected \"\n \"val={} and received val={}\".format(attr, value,\n decoded.get(attr.decode(), None)))",
"def check_attributes(self):\n self.assertEqual(type(self.amenity_1.name), str)",
"def validate_attribute(self, attr):\n self.attrs.validate_attribute(attr)",
"def test_attr(self):\n self.assertTrue(hasattr(self.amenity, \"created_at\"))\n self.assertTrue(hasattr(self.amenity, \"id\"))\n self.assertTrue(hasattr(self.amenity, \"updated_at\"))\n self.assertFalse(hasattr(self.amenity, \"random_attr\"))\n self.assertTrue(hasattr(self.amenity, \"name\"))\n self.assertEqual(self.amenity.__class__.__name__, \"Amenity\")\n self.assertEqual(self.amenity.name, \"\")",
"def test_attributes(self):\n attributes = storage.attributes()[\"Review\"]\n b = Review()\n for k, v in attributes.items():\n self.assertTrue(hasattr(b, k))\n self.assertEqual(type(getattr(b, k, None)), v)",
"def validate_attribute(self, attr):\n self.validate(attr)",
"def validate(attrs):\n print \"I GOT HERE.\"\n try:\n #required_attributes = ('qquuid', 'qqfilename')\n #[attrs.get(k) for k,v in attrs.items()]\n return True\n except Exception, e:\n return False",
"def is_valid_attribute(self, attr):\n return self.is_valid(attr)",
"def testattributes(self):\n for attr in AmuletAbility.attributes:\n a = AmuletAbility('Attribute', attr=attr)\n self.assert_(attr in str(a))\n self.assertEqual(a.attribute, attr)\n self.assertTrue(isinstance(a.AC, int))\n self.assertTrue(isinstance(a.description(), str))",
"def test_adding_attributes(self):\n self.assertEqual(self.compound.get_attribute(\"What\"), \"Everything\")",
"def has_attribute(self, name):\n\n pass",
"def attr(self, name):\r\n return Assert(getattr(self.obj, name))",
"def UseAttribute(self) -> bool:",
"def has_attributes(self):\n\n pass",
"def test_one_att(self):\n self.test_attribute.is_down = mock.Mock(return_value=False)\n self.run_mock_analyzer([self.test_attribute, ])\n self.assert_mock_analyzer(self.test_attribute)",
"def test_attr_cls(self):\n self.assertEqual(hasattr(self.review, \"text\"), True)\n self.assertEqual(hasattr(self.review, \"place_id\"), True)\n self.assertEqual(hasattr(self.review, \"user_id\"), True)",
"def validate_attributes(self, attrs):\n self.attrs.validate(attrs)",
"def validate(attrs):\n try:\n #required_attributes = ('qquuid', 'qqfilename')\n #[attrs.get(k) for k,v in attrs.items()]\n return True\n except Exception, e:\n return False",
"def about_attribute(self, name):\n for cdef in self.getmro():\n if name in cdef.attrs:\n s_result = cdef.attrs[name].s_value\n if s_result != s_ImpossibleValue:\n return s_result\n else:\n return None\n return None",
"def test_attributes(self):\n comp = str(self.test1)\n attr = ['BaseModel', 'id', 'created_at', 'updated_at']\n counter = 0\n for a in attr:\n if a in attr:\n counter += 1\n self.assertTrue(counter == 4)",
"def get_attribute(self, attr):\n logger.debug(\"GET ATTRIBUTE {}\".format(attr))",
"def test_attributes(self):\n self.assertTrue(hasattr(self.city, 'name'))\n self.assertTrue(hasattr(self.city, 'state_id'))",
"def assert_valid_attribute(self, name):\n if name.startswith('_'):\n return\n self.assert_known_field(name)",
"def test_attr(self):\n new_review = Review()\n self.assertTrue(hasattr(new_review, \"id\"))\n self.assertTrue(hasattr(new_review, \"created_at\"))\n self.assertTrue(hasattr(new_review, \"updated_at\"))\n self.assertTrue(hasattr(new_review, \"place_id\"))\n self.assertTrue(hasattr(new_review, \"user_id\"))\n self.assertTrue(hasattr(new_review, \"text\"))",
"def has_attr(self, key):\n return key in self.attrs",
"def getter_attributes_test(name, from_xml, from_dict, result):\n assert getattr(from_xml, name) == result\n assert getattr(from_dict, name) == result",
"def test_attribute(self):\n xp = XPathQuery(\"/foo[@attrib1]\")\n self.assertEqual(xp.matches(self.e), True)"
] | [
"0.71446437",
"0.70372206",
"0.696676",
"0.6889947",
"0.68315357",
"0.6809947",
"0.67375857",
"0.6713587",
"0.66863",
"0.6556088",
"0.65475464",
"0.65393233",
"0.6483959",
"0.6472178",
"0.64524734",
"0.64095545",
"0.6385835",
"0.63662297",
"0.63074434",
"0.6280292",
"0.6267609",
"0.6258932",
"0.6253027",
"0.6250221",
"0.62496465",
"0.6248021",
"0.6246069",
"0.6238079",
"0.6235272",
"0.623032"
] | 0.7044167 | 1 |
check for command result, raise failure when error encountered | def check_result(self, result):
self.log.info("--check_result, result= %s", result)
if result[0]['exit_status'] != 0:
self.fail("##Error detected from check_result")
else:
self.log.info("--check_result passed") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def error_check(command):\r\n\r\n # TODO\r",
"def __try_command(cmd, description):\n try:\n out = subprocess.check_output(cmd, stderr=subprocess.STDOUT);\n return (True, out.decode(\"utf-8\")) # success\n except subprocess.CalledProcessError as e:\n print(\"Error while {:s}, return code is non-zero ({:d})\".format(description, e.returncode))\n print(\"Command: {:s}\".format(\" \".join(e.cmd)))\n if e.output:\n print(\"Output: {:s}\".format(e.output.decode(\"utf-8\").strip()))\n\n return (False, None) # error",
"def _runCommandRaiseIfFail (self, command, killTimeout = DEAFULT_KILL_TIMEOUT, warningTimeout = DEAFULT_WARNING_TIMEOUT, shell=False):\n (rc,outText,errText) = self._runCommand(command, killTimeout = killTimeout, warningTimeout = warningTimeout, shell = shell)\n if rc != 0:\n self._log(\"run-command-raising\").warning(\"Command returned '%s', raising exception\", rc)\n raise SdUtilsError(\"Failed running command %s\" % command)\n return (outText,errText)",
"def execute_failure(self, *args, **kwargs):\n return 1, \"\", None",
"def cmd_error_check(self, cmd_out):\n for err in self.err_strings:\n if re.search('\\\\b%s\\\\b' % (err), cmd_out, re.I):\n _log.info(cmd_out)\n _log.info(\n \"Cmd execution failed! with this Return Error: \\n%s\" % (\n cmd_out))\n return 0",
"def test_run_and_check_result(self):\n # Run a successful command.\n result = build_cmake_project.run_and_check_result('echo hello world')\n self.assertTrue(result)\n\n # Run a failure command.\n try:\n result = build_cmake_project.run_and_check_result('unexistent --command')\n except subprocess.CalledProcessError:\n self.fail('Exception thrown when running unexistent command.')\n self.assertFalse(result)",
"def evaluateCommand(self, cmd):\n if cmd.rc != 0:\n return FAILURE\n if self.errors:\n\t if halt_on_lintian_error:\n\t return FAILURE\n\t else:\n\t\treturn WARNINGS\n if self.warnings:\n return WARNINGS\n return SUCCESS",
"def _checkCommandStatus(self, lastCommand=False):\n p = self.spawnProc\n p.sendline('echo $?')\n regex = re.compile('^[0-9]+',re.M)\n p.expect(regex, 2)\n msg = '_checkCommandStatus : Execution of command FAILED'\n \tif lastCommand:\n \t msg = '_checkCommandStatus :Execution of command : \"%s\" FAILED' %lastCommand\n if p.after != '0' and p.after != '99':\n raise AssertionError(msg)",
"def run_or_die(command):\n (status, stdio) = commands.getstatusoutput(command)\n if status != 0:\n raise Exception(\"command '%s' failed with exit status %d and output '%s'\" % (command, status, stdio))\n return stdio",
"def _error_check(self, command_response):\n error_list = command_response.find(\"./clierror\")\n command_obj = command_response.find(\"./input\")\n if error_list is not None:\n command = command_obj.text if command_obj is not None else \"Unknown command\"\n msg = etree.tostring(error_list).decode()\n raise NXAPICommandError(command, msg)",
"def execute(arg):\n print('Invalid command!!!')\n return",
"def isfailure(self):\n\n return self.proc.returncode != 0",
"def test_check_if_not_error(self):\n actual_result = SshpassErrorExitCodeController(OK_RETURN_CODE,\n OK_MESSAGE)\\\n .check_if_error()\n self.assertIsNone(actual_result)",
"def evaluateCommand(self, cmd):\n\n if cmd.rc == 127:\n return WARNINGS\n elif cmd.rc != 0:\n if halt_on_unittest_error:\n return FAILURE\n else:\n return WARNINGS\n\n if self.warnings or self.errors:\n return WARNINGS\n return SUCCESS",
"def run_check_errors(cmd):\n if type(cmd) == str:\n cmd = cmd.split()\n output = subprocess.run(cmd, capture_output=True, text=True)\n if output.stderr != \"\":\n print_cmd = \" \".join(map(str, cmd))\n sys.exit(\n f\"The error {output.stderr} was generated when running {print_cmd}. Exiting.\"\n )\n return",
"def cmd_result(is_success, cmd, output, error):\n\n\t\tself.__logs.append(output)",
"def check_output(command):\n process = Popen(command, shell=True, stdout=PIPE)\n output, err = process.communicate()\n if process.returncode == 0: # success\n return output\n else:\n raise RuntimeError(\"Command {0} running unsuccessfully\".format(command))",
"def test_execute_or_bail_ok(self):\n with self.assertLogs(level=\"INFO\") as cm:\n with etl.commands.execute_or_bail(\"unittest\"):\n pass\n self.assertEqual(len(cm.output), 1)\n self.assertTrue(\"finished successfully\" in cm.output[0])",
"def test_check_if_not_error(self):\n actual_result = SshErrorExitCodeController(ERROR_RETURN_CODE,\n OK_MESSAGE)\\\n .check_if_error()\n self.assertIsNone(actual_result)",
"def _check_command(self, resp, prompt):\n for line in resp.split(NEWLINE):\n if line.startswith('?'):\n raise InstrumentProtocolException('error processing command (%r)', resp[1:])\n if line.startswith('*'): # response\n if not valid_response(line):\n raise InstrumentProtocolException('checksum failed (%r)', line)",
"def test_check_if_not_error(self):\n actual_result = SshErrorExitCodeController(OK_RETURN_CODE,\n OK_MESSAGE)\\\n .check_if_error()\n self.assertIsNone(actual_result)",
"def run_cmd(cls, cmd):\n cmd_rc = False\n cmd_result = b'' # Stores bytes\n\n if cmd == \"\" or cmd is None:\n cmd_rc = False\n else:\n # shell=True means cmd param contains a regular cmd string\n shell = shl.Popen(cmd, shell=True,\n stdin=shl.PIPE, stdout=shl.PIPE, stderr=shl.STDOUT)\n cmd_result, _ = shell.communicate()\n if 'failure'.encode('utf-8') in cmd_result or 'fatal'.encode('utf-8') in cmd_result:\n cmd_rc = False\n else:\n cmd_rc = True\n return (cmd_rc, cmd_result)",
"def _execute(self, cmd):\r\n stdout, stderr, return_code = self._remote_client.run_remote_cmd(cmd)\r\n if return_code:\r\n raise exceptions.ArgusError(\r\n \"Command {command!r} failed with \"\r\n \"return code {return_code!r}\"\r\n .format(command=cmd,\r\n return_code=return_code))\r\n return stdout, stderr",
"def expects_result(self, command):\n return isinstance(command, (self.package(\"Syntax\").Operator,\n self.package(\"Syntax\").Formule))",
"def test_check_if_not_error(self):\n actual_result = ExecutionExitCodeController(OK_RETURN_CODE,\n OK_MESSAGE)\\\n .check_if_error()\n self.assertIsNone(actual_result)",
"def failure(self, result):\r\n raise NotImplementedError",
"def execute_success(self, *args, **kwargs):\n return 0, self.shell_output, None",
"def _ssh_quiet(self, cmd, allow_fail=False):\n try:\n if self.logger is not None:\n self.logger.debug(\"Trying to run remote command: \" + cmd)\n subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)\n return 0\n except subprocess.CalledProcessError as e:\n # Depending on the user, failure of remote commands may be part of normal usage patterns (e.g. if used in\n # a \"wait_until\" loop). So, log it, but don't make it too scary.\n if self.logger is not None:\n self.logger.debug(\"Error running remote command: \" + cmd)\n self.logger.debug(e.output)\n\n if allow_fail:\n return e.returncode\n raise e",
"def test_shell_bad_command():\n out, err = shell_command(\"ls adasdasdas\")\n assert out is None\n assert \"adasdasdas\" in err",
"def ErrCheckBool(result, func, args):\n if not result:\n raise WinError()\n return args"
] | [
"0.7622423",
"0.7359788",
"0.7271346",
"0.72338516",
"0.7085878",
"0.70216614",
"0.70130956",
"0.6983405",
"0.69500047",
"0.6859855",
"0.6827278",
"0.68127596",
"0.6743249",
"0.67426986",
"0.6713755",
"0.67079455",
"0.6696678",
"0.66868126",
"0.6676383",
"0.66693264",
"0.6666139",
"0.6583459",
"0.6580775",
"0.65728116",
"0.65716887",
"0.65700775",
"0.6565501",
"0.656435",
"0.6558497",
"0.6518098"
] | 0.7674272 | 0 |
Upgrade hosts via repository or RPMs | def upgrade(self, servers, clients):
if ".repo" in self.upgrade_repo:
repo_2 = self.upgrade_repo
repo_1 = self.downgrade_repo
self.updowngrade_via_repo(servers, clients, repo_1, repo_2)
else:
all_hosts = servers + clients
self.updowngrade_via_rpms(all_hosts, "upgrade", self.upgrade_repo) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def upgrade_and_downgrade(self, fault_on_pool_upgrade=False):\n # (1)Setup\n self.log.info(\"(1)==Setup and show rpm, dmg and daos versions on all hosts.\")\n hosts_client = self.hostlist_clients\n hosts_server = self.hostlist_servers\n all_hosts = include_local_host(hosts_server)\n self.upgrade_repo = self.params.get(\"upgrade_repo\", '/run/interop/*')\n self.downgrade_repo = self.params.get(\"downgrade_repo\", '/run/interop/*')\n num_attributes = self.params.get(\"num_attributes\", '/run/attrtests/*')\n ior_api = self.params.get(\"api\", '/run/ior/*')\n mount_dir = self.params.get(\"mount_dir\", '/run/dfuse/*')\n self.show_daos_version(all_hosts, hosts_client)\n\n # (2)Create pool container and pool attributes\n self.log.info(\"(2)==Create pool attributes.\")\n self.add_pool(connect=False)\n pool_id = self.pool.identifier\n self.add_container(self.pool)\n self.container.open()\n self.daos_cmd = self.get_daos_command()\n pool_attr_dict = self.create_data_set(num_attributes)\n self.pool.pool.set_attr(data=pool_attr_dict)\n self.verify_pool_attrs(pool_attr_dict)\n self.container.close()\n self.pool.disconnect()\n\n # (3)Setup and run IOR\n self.log.info(\"(3)==Setup and run IOR.\")\n result = run_pcmd(hosts_client, \"mkdir -p {}\".format(mount_dir))\n ior_timeout = self.params.get(\"ior_timeout\", '/run/ior/*')\n iorflags_write = self.params.get(\"write_flg\", '/run/ior/iorflags/*')\n iorflags_read = self.params.get(\"read_flg\", '/run/ior/iorflags/*')\n testfile = os.path.join(mount_dir, \"testfile\")\n testfile_sav = os.path.join(mount_dir, \"testfile_sav\")\n testfile_sav2 = os.path.join(mount_dir, \"testfile_sav2\")\n symlink_testfile = os.path.join(mount_dir, \"symlink_testfile\")\n # (3.a)ior dfs\n if ior_api in (\"DFS\", \"POSIX\"):\n self.log.info(\"(3.a)==Run non-HDF5 IOR write and read.\")\n self.ior_cmd.flags.update(iorflags_write)\n self.run_ior_with_pool(\n timeout=ior_timeout, create_pool=True, create_cont=True, stop_dfuse=False)\n self.ior_cmd.flags.update(iorflags_read)\n self.run_ior_with_pool(\n timeout=ior_timeout, create_pool=False, create_cont=False, stop_dfuse=False)\n\n # (3.b)ior hdf5\n elif ior_api == \"HDF5\":\n self.log.info(\"(3.b)==Run IOR HDF5 write and read.\")\n hdf5_plugin_path = self.params.get(\"plugin_path\", '/run/hdf5_vol/')\n self.ior_cmd.flags.update(iorflags_write)\n self.run_ior_with_pool(\n plugin_path=hdf5_plugin_path, mount_dir=mount_dir,\n timeout=ior_timeout, create_pool=True, create_cont=True, stop_dfuse=False)\n self.ior_cmd.flags.update(iorflags_read)\n self.run_ior_with_pool(\n plugin_path=hdf5_plugin_path, mount_dir=mount_dir,\n timeout=ior_timeout, create_pool=False, create_cont=False, stop_dfuse=False)\n else:\n self.fail(\"##(3)Unsupported IOR api {}\".format(ior_api))\n\n # (3.c)ior posix test file with symlink\n if ior_api == \"POSIX\":\n self.log.info(\"(3.c)==Symlink mounted testfile.\")\n result = run_pcmd(hosts_client, \"cd {}\".format(mount_dir))\n result = run_pcmd(hosts_client, \"ls -l {}\".format(testfile))\n result = run_pcmd(hosts_client, \"cp {0} {1}\".format(testfile, testfile_sav))\n self.check_result(result)\n result = run_pcmd(hosts_client, \"cp {0} {1}\".format(testfile, testfile_sav2))\n self.check_result(result)\n result = run_pcmd(\n hosts_client, \"ln -vs {0} {1}\".format(testfile_sav2, symlink_testfile))\n result = run_pcmd(hosts_client, \"diff {0} {1}\".format(testfile, testfile_sav))\n self.check_result(result)\n result = run_pcmd(hosts_client, \"ls -l {}\".format(symlink_testfile))\n self.check_result(result)\n self.container.close()\n self.pool.disconnect()\n result = run_pcmd(hosts_client, \"fusermount3 -u {}\".format(mount_dir))\n self.check_result(result)\n\n # Verify pool attributes before upgrade\n self.log.info(\"(3.2)==verify pool attributes before upgrade.\")\n self.verify_pool_attrs(pool_attr_dict)\n\n # (4)dmg system stop\n self.log.info(\"(4)==Dmg system stop.\")\n self.get_dmg_command().system_stop()\n errors = []\n errors.extend(self._stop_managers(self.server_managers, \"servers\"))\n errors.extend(self._stop_managers(self.agent_managers, \"agents\"))\n\n # (5)Upgrade\n self.log.info(\"(5)==Upgrade RPMs to 2.2.\")\n self.upgrade(hosts_server, hosts_client)\n\n self.log.info(\"==sleeping 30 more seconds\")\n time.sleep(30)\n # (6)Restart servers\n self.log.info(\"(6)==Restart servers.\")\n self.restart_servers()\n\n # (7)Verification after upgrade\n # Restart agent\n self.log.info(\"(7.1)====Restarting rel_2.2 agent after upgrade.\")\n self._start_manager_list(\"agent\", self.agent_managers)\n self.show_daos_version(all_hosts, hosts_client)\n\n self.get_dmg_command().pool_list(verbose=True)\n self.get_dmg_command().pool_query(pool=pool_id)\n self.daos_cmd.pool_query(pool=pool_id)\n\n # Verify pool attributes\n self.log.info(\"(7.2)====Verifying pool attributes after upgrade.\")\n self.verify_pool_attrs(pool_attr_dict)\n self.daos_ver_after_upgraded(hosts_client)\n\n # Verify IOR data and symlink\n self.log.info(\"(7.3)====Verifying container data IOR read.\")\n if ior_api == \"DFS\":\n self.log.info(\"(7.a)==Run IOR DFS read verification.\")\n self.run_ior_with_pool(\n timeout=ior_timeout, create_pool=False, create_cont=False, stop_dfuse=False)\n elif ior_api == \"HDF5\":\n self.log.info(\"(7.b)==Run IOR HDF5 read verification.\")\n self.run_ior_with_pool(\n plugin_path=hdf5_plugin_path, mount_dir=mount_dir,\n timeout=ior_timeout, create_pool=False, create_cont=False, stop_dfuse=False)\n else:\n self.log.info(\"(7.c)==Run Symlink check after upgraded.\")\n result = run_pcmd(\n hosts_client,\n \"dfuse --mountpoint {0} --pool {1} --container {2}\".format(\n mount_dir, pool_id, self.container))\n self.check_result(result)\n result = run_pcmd(hosts_client, \"diff {0} {1}\".format(testfile, testfile_sav))\n self.check_result(result)\n result = run_pcmd(hosts_client, \"diff {0} {1}\".format(symlink_testfile, testfile_sav2))\n self.check_result(result)\n\n # (8)Dmg pool get-prop\n self.log.info(\"(8)==Dmg pool get-prop after RPMs upgraded before Pool upgraded\")\n result = run_pcmd(hosts_client, \"dmg pool get-prop {}\".format(pool_id))\n self.check_result(result)\n\n # (9)Pool property verification after upgraded\n self.log.info(\"(9)==Dmg pool upgrade and get-prop after RPMs upgraded\")\n\n if fault_on_pool_upgrade and self.has_fault_injection(hosts_client):\n self.log.info(\"(9.1a)==Pool upgrade with fault-injection.\")\n self.pool_upgrade_with_fault(hosts_client, pool_id)\n else:\n self.log.info(\"(9.1b)==Pool upgrade.\")\n result = run_pcmd(hosts_client, \"dmg pool upgrade {}\".format(pool_id))\n self.check_result(result)\n result = run_pcmd(hosts_client, \"dmg pool get-prop {}\".format(pool_id))\n self.check_result(result)\n self.log.info(\"(9.2)==verify pool attributes after pool-upgraded.\")\n self.verify_pool_attrs(pool_attr_dict)\n self.pool.destroy()\n\n # (10)Create new pool\n self.log.info(\"(10)==Create new pool after rpms Upgraded\")\n self.add_pool(connect=False)\n pool2_id = self.pool.identifier\n self.get_dmg_command().pool_list(verbose=True)\n self.get_dmg_command().pool_query(pool=pool2_id)\n self.daos_cmd.pool_query(pool=pool2_id)\n result = run_pcmd(hosts_client, \"dmg pool get-prop {}\".format(pool2_id))\n self.check_result(result)\n\n # (11)Downgrade and cleanup\n self.log.info(\"(11)==Downgrade and cleanup.\")\n if ior_api == \"POSIX\":\n result = run_pcmd(hosts_client, \"fusermount3 -u {}\".format(mount_dir))\n self.check_result(result)\n self.container.close()\n self.pool.disconnect()\n self.pool.destroy()\n self.get_dmg_command().system_stop()\n errors = []\n errors.extend(self._stop_managers(self.server_managers, \"servers\"))\n errors.extend(self._stop_managers(self.agent_managers, \"agents\"))\n self.log.info(\"(11.1)==Downgrade RPMs to 2.0.3.\")\n self.downgrade(hosts_server, hosts_client)\n self.log.info(\"==sleeping 30 more seconds\")\n time.sleep(30)\n\n # (12)Cleanup restart server and agent\n self.log.info(\"(12)==Restart 2.0 servers and agent.\")\n self.restart_servers()\n self._start_manager_list(\"agent\", self.agent_managers)\n self.show_daos_version(all_hosts, hosts_client)\n if fault_on_pool_upgrade and not self.has_fault_injection(hosts_client):\n self.fail(\"##(12)Upgraded-rpms did not have fault-injection feature.\")\n self.log.info(\"==(12)Test passed\")",
"def package_upgrade():\n\n if (do_action_package_upgrade('nova-common',\n do_openstack_upgrade,\n CONFIGS)):\n # we should restart the container scoped (subordinate) plugins after a\n # managed openstack upgrade see: BUG#1835557\n for rid in relation_ids('neutron-plugin'):\n neutron_plugin_joined(rid, remote_restart=True)\n for rid in relation_ids('nova-ceilometer'):\n nova_ceilometer_joined(rid, remote_restart=True)\n for rid in relation_ids('nova-vgpu'):\n nova_vgpu_joined(rid, remote_restart=True)\n # NOTE(ajkavanagh) - if unit is paused (usually true for managed\n # upgrade) then the config_changed() function is a no-op\n config_changed()",
"def upgrade_server():\n log('Atualizando programas', yellow)\n sudo('apt-get -y upgrade')",
"def upgrade(self):\n # The workaround we need in order to fix [1]. In few words,\n # when new Docker is installed the containers MUST NOT start\n # again because in this case puppet inside them will install\n # latest packages and breaks dependencies in some soft.\n #\n # [1]: https://bugs.launchpad.net/fuel/+bug/1455419\n self.supervisor.stop_all_services()\n\n self.install_repos()\n self.update_repo()\n self.install_packages()\n self.run_puppet()",
"def task_upgrade(self):\n with settings(user=self.serviceUser):\n self.update()\n run(\"~/virtualenv/bin/trac-admin {}/trac-env upgrade\".format(self.configDir))\n run(\"~/virtualenv/bin/trac-admin {}/trac-env wiki upgrade\".format(self.configDir))\n\n self.task_restart()",
"def updateHosts(request):\n\n updater = HostUpdater()\n updater.run()\n return http.HttpResponse(\"Ok\")",
"def upgrade_packages():\n\n require('environment', provided_by=env.environments)\n system.update_apt_sources()\n system.upgrade_apt_packages()",
"def upgrade_kernel_node(*args):\n for host_string in args:\n with settings(host_string=host_string):\n dist, version, extra = get_linux_distro()\n print \"upgrading apparmor before upgrading kernel\"\n if version == '12.04':\n apt_install([\"apparmor\"])\n print \"Installing 3.13.0-34 kernel headers\"\n apt_install([\"linux-headers-3.13.0-34\"])\n apt_install([\"linux-headers-3.13.0-34-generic\"])\n print \"Upgrading the kernel to 3.13.0-34\"\n apt_install([\"linux-image-3.13.0-34-generic\"])\n elif version == '14.04':\n print \"Installing 3.13.0-40 kernel headers\"\n apt_install([\"linux-headers-3.13.0-40\",\n \"linux-headers-3.13.0-40-generic\"])\n print \"Upgrading the kernel to 3.13.0-40\"\n apt_install([\"linux-image-3.13.0-40-generic\",\n \"linux-image-extra-3.13.0-40-generic\"])",
"def do_upgrade(env, ver, cursor):\n cursor.execute('UPDATE system SET name=%s WHERE name=%s',\n (\"agiletools_version\", \"taskboard_schema\"))",
"def _install(self, host):\n pass",
"def downgrade(self, servers, clients):\n if \".repo\" in self.upgrade_repo:\n repo_1 = self.upgrade_repo\n repo_2 = self.downgrade_repo\n self.updowngrade_via_repo(servers, clients, repo_1, repo_2)\n else:\n all_hosts = servers + clients\n self.updowngrade_via_rpms(all_hosts, \"downgrade\", self.downgrade_repo)",
"def upgrade(self):",
"def upgrade(self):",
"def upgrade_kernel_node(*args, **kwargs):\n for host_string in args:\n with settings(host_string=host_string):\n execute('create_install_repo_node', host_string)\n dist, version, extra = get_linux_distro()\n if version == '12.04':\n print \"upgrading apparmor before upgrading kernel\"\n apt_install([\"apparmor\"])\n print \"Installing 3.13.0-34 kernel headers\"\n apt_install([\"linux-headers-3.13.0-34\"])\n apt_install([\"linux-headers-3.13.0-34-generic\"])\n print \"Upgrading the kernel to 3.13.0-34\"\n apt_install([\"linux-image-3.13.0-34-generic\"])\n default_grub='Advanced options for Ubuntu>Ubuntu, with Linux 3.13.0-34-generic'\n execute('set_grub_default_node', host_string, value=default_grub)\n elif version == '14.04':\n if 'version' in kwargs:\n kernel_ver = kwargs.get('version')\n else:\n kernel_ver = \"3.13.0-106\"\n print \"Installing \"+kernel_ver+\" kernel headers\"\n apt_install([\"linux-headers-\"+kernel_ver,\n \"linux-headers-\"+kernel_ver+\"-generic\"])\n print \"Upgrading the kernel to \"+kernel_ver\n apt_install([\"linux-image-\"+kernel_ver+\"-generic\",\n \"linux-image-extra-\"+kernel_ver+\"-generic\"])\n default_grub='Advanced options for Ubuntu>Ubuntu, with Linux '+kernel_ver+'-generic'\n execute('set_grub_default_node', host_string, value=default_grub)\n elif 'red hat' in dist.lower() and version.startswith('7'):\n print \"Upgrading RHEL kernel to version 3.10.0-327.10.1\"\n pkg_install([\"kernel-3.10.0-327.10.1.el7.x86_64\",\n \"kernel-tools-3.10.0-327.10.1.el7.x86_64\",\n \"kernel-tools-libs-3.10.0-327.10.1.el7.x86_64\",\n \"kernel-headers-3.10.0-327.10.1.el7.x86_64\"], disablerepo=False)\n default_grub='Red Hat Enterprise Linux Server (3.10.0-327.10.1.el7.x86_64) 7.2 (Maipo)'\n execute('set_grub_default_node', host_string, value=default_grub)\n elif 'centos linux' in dist.lower() and version.startswith('7'):\n print \"Upgrading Centos kernel to version 3.10.0-327.10.1\"\n pkg_install([\"kernel-3.10.0-327.10.1.el7.x86_64\",\n \"kernel-tools-3.10.0-327.10.1.el7.x86_64\",\n \"kernel-tools-libs-3.10.0-327.10.1.el7.x86_64\",\n \"kernel-headers-3.10.0-327.10.1.el7.x86_64\"], disablerepo=False)\n default_grub='CentOS Linux (3.10.0-327.10.1.el7.x86_64) 7 (Core)'\n execute('set_grub_default_node', host_string, value=default_grub)",
"def upgrade(self,summary_handle,role,rpm_keyword,image_url,dir_installer,exit_flag,mode,summary_var_dict={}):\n if image_url.endswith(\"/\"):\n imageurl_final = image_url\n else:\n imageurl_final = image_url + \"/\"\n\n length = len(imageurl_final.split('/')) -4\n cmd = \"yum clean all\"\n self.sendCmd(cmd,300)\n dir_installer_role = dir_installer + \"/\" + role\n self.changeDirectory(dir_installer_role)\n tmp_var = \"wget%s%s\" %(self,role)\n\n ##### IF loop added for recovery option\n if mode == \"RECOVERY\":\n flag = self.check_var_in_dict(tmp_var,summary_var_dict)\n if flag == \"false\":\n self.download_rpm(summary_handle,length,imageurl_final,role)\n else:\n self.download_rpm(summary_handle,length,imageurl_final,role)\n\n\n num_files = \"ls -lrt *\\.rpm | grep %s-[0-9] | awk \\'{print $NF}\\' | xargs ls -t | tail -n1\" %rpm_keyword\n output = self.sendCmd(num_files).split(\"\\n\")\n for each in output:\n if each.rstrip().endswith(\"rpm\"):\n\n ##### Step added for uninstalling the rpm before installing \n tmpcmd = \"yum -y remove \" + each.rstrip().rstrip(\".rpm\")\n\n\n tmpcmd1 = \"yum -y install \" + each.rstrip()\n tmp_var = \"%s%s%s\" %(tmpcmd1,self,role)\n\n ##### IF loop added for recovery option\n if mode == \"RECOVERY\":\n flag = self.check_var_in_dict(tmp_var,summary_var_dict)\n if flag == \"true\":\n continue\n\n\n output = self.sendCmd(tmpcmd,600)\n output = self.sendCmd(tmpcmd1,600)\n time.sleep(30)\n output1 = self.sendCmd(\"echo $?\").split(\"\\n\")\n output2 = [item.replace(\"\\r\", \"\") for item in output1]\n if \"0\" not in output2 :\n summary_handle.write(\"%s,%s,%s,fail \\n\" %(tmpcmd1,self,role))\n if exit_flag == \"yes\":\n report.fail(\"Installation failed for %s on node %s having role %s with following error message : \\n %s\" %(each.strip(),self,role,output))\n else:\n logger.info(\"Installation failed for %s on node %s having role %s with following error message : \\n %s\" %(each.strip(),self,role,output))\n else:\n summary_handle.write(\"%s,%s,%s,pass \\n\" %(tmpcmd1,self,role))\n logger.info(\"Successful installation of %s on node %s having role %s\" %(each.strip(),self,role))",
"def upgrade_kernel():\n execute(\"upgrade_kernel_node\", env.host_string)",
"def reinstall_host(self, hostid, config, **kwargs):\n pass",
"def upgrade(verbose_level=1, hostnames=[], servicenames=[]):\n # type: (int, List[str], List[str]) -> Job\n check_arg(verbose_level, u._('Verbose level'), int)\n check_arg(hostnames, u._('Host names'), list,\n empty_ok=True, none_ok=True)\n check_arg(servicenames, u._('Service names'), list,\n empty_ok=True, none_ok=True)\n\n check_kolla_args(servicenames=servicenames)\n\n hostnames = safe_decode(hostnames)\n servicenames = safe_decode(servicenames)\n action = KollaAction(verbose_level=verbose_level,\n playbook_name='site.yml')\n ansible_job = action.upgrade(hostnames, servicenames)\n return Job(ansible_job)",
"def upgrade(packages):\n setup_audit_log()\n packages = CFG.package_specs(packages)\n if not packages:\n inform(\"No packages installed, nothing to upgrade\")\n sys.exit(0)\n\n for pspec in packages:\n perform_install(pspec, is_upgrade=True, force=False, quiet=False)",
"def diff_versions_agent_server(self):\n # (1)Setup\n self.log.info(\"==(1)Setup, create pool and container.\")\n hosts_client = self.hostlist_clients\n hosts_server = self.hostlist_servers\n all_hosts = include_local_host(hosts_server | hosts_client)\n self.upgrade_repo = self.params.get(\"upgrade_repo\", '/run/interop/*')\n self.downgrade_repo = self.params.get(\"downgrade_repo\", '/run/interop/*')\n self.add_pool(connect=False)\n pool_id = self.pool.identifier\n self.add_container(self.pool)\n self.container.open()\n cmd = \"dmg system query\"\n positive_test = True\n negative_test = False\n agent_server_ver = \"2.0 agent to 2.0 server\"\n self.verify_daos_libdaos(\"1.1\", hosts_client, cmd, positive_test, agent_server_ver)\n\n # (2)dmg system stop\n self.log.info(\"==(2)Dmg system stop.\")\n self.get_dmg_command().system_stop()\n errors = []\n errors.extend(self._stop_managers(self.server_managers, \"servers\"))\n errors.extend(self._stop_managers(self.agent_managers, \"agents\"))\n\n # (3)Upgrade 1 server-host to new\n self.log.info(\"==(3)Upgrade 1 server to 2.2.\")\n server = hosts_server[0:1]\n self.upgrade(server, [])\n self.log.info(\"==(3.1)server %s Upgrade to 2.2 completed.\", server)\n\n # (4)Negative test - dmg pool query on mix-version servers\n self.log.info(\"==(4)Negative test - dmg pool query on mix-version servers.\")\n agent_server_ver = \"2.0 agent, mix-version server-hosts\"\n cmd = \"dmg pool list\"\n exp_err = \"unable to contact the DAOS Management Service\"\n self.verify_daos_libdaos(\n \"4.1\", hosts_client, cmd, negative_test, agent_server_ver, exp_err)\n\n # (5)Upgrade rest server-hosts to 2.2\n server = hosts_server[1:len(hosts_server)]\n self.log.info(\"==(5) Upgrade rest server %s to 2.2.\", server)\n self.upgrade(server, [])\n self.log.info(\"==(5.1) server %s Upgrade to 2.2 completed.\", server)\n\n # (6)Restart 2.0 agent\n self.log.info(\"==(6)Restart 2.0 agent\")\n self._start_manager_list(\"agent\", self.agent_managers)\n self.show_daos_version(all_hosts, hosts_client)\n\n # (7)Verify 2.0 agent connect to 2.2 server\n self.log.info(\"==(7)Verify 2.0 agent connect to 2.2 server\")\n agent_server_ver = \"2.0 agent to 2.2 server\"\n cmd = \"daos pool query {0}\".format(pool_id)\n self.verify_daos_libdaos(\"7.1\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"dmg pool query {0}\".format(pool_id)\n exp_err = \"admin:0.0.0 are not compatible\"\n self.verify_daos_libdaos(\n \"7.2\", hosts_client, cmd, negative_test, agent_server_ver, exp_err)\n cmd = \"sudo daos_agent dump-attachinfo\"\n self.verify_daos_libdaos(\"7.3\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos cont create {0} --type POSIX --properties 'rf:2'\".format(pool_id)\n self.verify_daos_libdaos(\"7.4\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos pool autotest --pool {0}\".format(pool_id)\n self.verify_daos_libdaos(\"7.5\", hosts_client, cmd, positive_test, agent_server_ver)\n\n # (8)Upgrade agent to 2.2\n self.log.info(\"==(8)Upgrade agent to 2.2, now 2.2 servers 2.2 agent.\")\n self.upgrade([], hosts_client)\n self._start_manager_list(\"agent\", self.agent_managers)\n self.show_daos_version(all_hosts, hosts_client)\n\n # (9)Pool and containers create on 2.2 agent and server\n self.log.info(\"==(9)Create new pools and containers on 2.2 agent to 2.2 server\")\n agent_server_ver = \"2.2 agent to 2.2 server\"\n cmd = \"dmg pool create --size 5G New_pool1\"\n self.verify_daos_libdaos(\"9.1\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"dmg pool list\"\n self.verify_daos_libdaos(\"9.2\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos cont create New_pool1 C21 --type POSIX --properties 'rf:2'\"\n self.verify_daos_libdaos(\"9.3\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos cont create New_pool1 C22 --type POSIX --properties 'rf:2'\"\n self.verify_daos_libdaos(\"9.4\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos container list New_pool1\"\n self.verify_daos_libdaos(\"9.5\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"sudo daos_agent dump-attachinfo\"\n self.verify_daos_libdaos(\"9.6\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos pool autotest --pool New_pool1\"\n self.verify_daos_libdaos(\"9.7\", hosts_client, cmd, positive_test, agent_server_ver)\n\n # (10)Downgrade server to 2.0\n self.log.info(\"==(10)Downgrade server to 2.0, now 2.2 agent to 2.0 server.\")\n self.log.info(\"==(10.1)Dmg system stop.\")\n self.get_dmg_command().system_stop()\n errors = []\n errors.extend(self._stop_managers(self.server_managers, \"servers\"))\n errors.extend(self._stop_managers(self.agent_managers, \"agents\"))\n self.log.info(\"==(10.2)Downgrade server to 2.0\")\n self.downgrade(hosts_server, [])\n self.log.info(\"==(10.3)Restart 2.0 agent\")\n self._start_manager_list(\"agent\", self.agent_managers)\n self.show_daos_version(all_hosts, hosts_client)\n\n # (11)Verify 2.2 agent to 2.0 server\n agent_server_ver = \"2.2 agent to 2.0 server\"\n cmd = \"daos pool query {0}\".format(pool_id)\n self.verify_daos_libdaos(\"11.1\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"dmg pool query {0}\".format(pool_id)\n exp_err = \"does not match\"\n self.verify_daos_libdaos(\n \"11.2\", hosts_client, cmd, negative_test, agent_server_ver, exp_err)\n cmd = \"sudo daos_agent dump-attachinfo\"\n self.verify_daos_libdaos(\"11.3\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos cont create {0} 'C_oldP' --type POSIX --properties 'rf:2'\".format(\n pool_id)\n self.verify_daos_libdaos(\"11.4\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos cont create New_pool1 'C_newP' --type POSIX --properties 'rf:2'\"\n exp_err = \"DER_NO_SERVICE(-2039)\"\n self.verify_daos_libdaos(\n \"11.5\", hosts_client, cmd, negative_test, agent_server_ver, exp_err)\n exp_err = \"common ERR\"\n cmd = \"daos pool autotest --pool {0}\".format(pool_id)\n self.verify_daos_libdaos(\n \"11.6\", hosts_client, cmd, negative_test, agent_server_ver, exp_err)\n\n # (12)Downgrade agent to 2.0\n self.log.info(\"==(12)Agent %s Downgrade started.\", hosts_client)\n self.downgrade([], hosts_client)\n self.log.info(\"==Test passed\")",
"def pool_upgrade_with_fault(self, hosts, pool_id):\n # Verify pool status before upgrade\n expected_status = \"not started\"\n self.verify_pool_upgrade_status(pool_id, expected_status)\n\n # Enable fault-injection\n self.enable_disable_fault_injection(hosts, enable=True)\n\n # Pool upgrade\n result = run_pcmd(hosts, \"dmg pool upgrade {}\".format(pool_id))\n self.check_result(result)\n # Verify pool status during upgrade\n expected_status = \"in progress\"\n self.verify_pool_upgrade_status(pool_id, expected_status)\n # Verify pool status during upgrade\n expected_status = \"failed\"\n self.verify_pool_upgrade_status(pool_id, expected_status)\n\n # Disable fault-injection\n self.enable_disable_fault_injection(hosts, enable=False)\n # Verify pool upgrade resume after removal of fault-injection\n expected_status = \"completed\"\n self.verify_pool_upgrade_status(pool_id, expected_status)",
"def deploy():\n update_treesheets()\n restart_treesheets()",
"def test_do_upgrade(self):\n with self.with_config_update():\n result = self.runner.invoke(\n cli,\n [\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:latest\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 0",
"def deploy_nixos(hosts: List[DeployHost]) -> None:\n g = DeployGroup(hosts)\n\n def deploy(h: DeployHost) -> None:\n target = f\"{h.user or 'root'}@{h.host}\"\n flake_path = h.meta.get(\"flake_path\", \"/etc/nixos\")\n h.run_local(\n f\"rsync {' --exclude '.join([''] + RSYNC_EXCLUDES)} -vaF --delete -e ssh . {target}:{flake_path}\"\n )\n\n flake_attr = h.meta.get(\"flake_attr\", \"\")\n if flake_attr:\n flake_attr = \"#\" + flake_attr\n target_host = h.meta.get(\"target_host\")\n if target_host:\n target_user = h.meta.get(\"target_user\")\n if target_user:\n target_host = f\"{target_user}@{target_host}\"\n extra_args = h.meta.get(\"extra_args\", [])\n cmd = (\n [\"nixos-rebuild\", \"switch\"]\n + extra_args\n + [\n \"--fast\",\n \"--option\",\n \"keep-going\",\n \"true\",\n \"--option\",\n \"accept-flake-config\",\n \"true\",\n \"--build-host\",\n \"\",\n \"--flake\",\n f\"{flake_path}{flake_attr}\",\n ]\n )\n if target_host:\n cmd.extend([\"--target-host\", target_host])\n ret = h.run(cmd, check=False)\n # re-retry switch if the first time fails\n if ret.returncode != 0:\n ret = h.run(cmd)\n\n g.run_function(deploy)",
"def deploy(c, _hosts=\"\"):\n eve = DeployHost(\"eve.i\", user=\"root\")\n if _hosts != \"\":\n hosts = get_hosts(_hosts)\n else:\n hosts = [\n eve,\n DeployHost(\n \"localhost\",\n user=\"joerg\",\n meta=dict(\n extra_args=[\"--use-remote-sudo\"],\n flake_path=\"/home/joerg/.homesick/repos/dotfiles\",\n ),\n forward_agent=True,\n ),\n DeployHost(\n \"eve.i\",\n user=\"root\",\n forward_agent=True,\n command_prefix=\"eva.r\",\n meta=dict(target_host=\"eva.i\", flake_attr=\"eva\"),\n ),\n DeployHost(\n \"eve.i\",\n user=\"root\",\n forward_agent=True,\n command_prefix=\"blob64.r\",\n meta=dict(target_host=\"blob64.r\", flake_attr=\"blob64\"),\n ),\n ]\n deploy_nixos(hosts)\n eve.run(\"systemctl restart buildbot-master\")",
"def deploy():",
"def deploy():\n\n require('environment', provided_by=env.environments)\n update_source()\n update_requirements()\n mgmt('syncdb', '--migrate')\n restart_supervisor()",
"def deploy(upgrade=False):\n print(\"Deploying project on {} !\".format(env.stage))\n execute('system.setup')\n execute('git.checkout')\n execute('virtualenv.setup')\n execute('django.setup')\n execute('cron.setup')\n execute('uwsgi.setup')\n execute('supervisor.setup')\n execute('nginx.setup')",
"def deploy(env='development', update_settings='n', upgrade_apps='n'):\n update_site(env, update_settings, upgrade_apps)\n restart_site(env)",
"def update():\n\n # update plone\n with cd(env.directory):\n sudo('git pull', user=env.deploy_user)\n\n with cd(env.directory):\n stop()\n sudo('git checkout {}'.format(env.branch), user=env.deploy_user)\n\n # bootstrap\n\n if env.latest:\n sudo('./bin/pip install --no-cache-dir -r https://raw.githubusercontent.com/plone/buildout.coredev/5.2/requirements.txt', user=env.deploy_user) # noqa: E501\n sudo('rm -rf ./src-mrd', user=env.deploy_user)\n else:\n sudo('./bin/pip install --no-cache-dir -r requirements.txt', user=env.deploy_user) # noqa: E501\n\n sudo('rm -rf ./var/blobstorage ./var/filestorage .installed.cfg ', user=env.deploy_user) # noqa: E501\n\n # buildout\n sudo('./bin/buildout', user=env.deploy_user)\n\n # start zope\n start()\n sudo(\"sleep 10\")\n\n # create plonesite with addons (uses different ports for py2 and py3)\n if env.latest:\n if env.python3:\n with cd(env.directory):\n sudo(\"/usr/bin/wget -O- --user=admin --password=admin --post-data='site_id=Plone&form.submitted=True&title=Website&default_language=de&portal_timezone=Europe/Berlin&extension_ids=plonetheme.barceloneta:default&extension_ids=plone.app.contenttypes:plone-content&extension_ids=plonedemo.site:default' http://127.0.0.1:{zeoclient_port}/@@plone-addsite &> ./var/log/wget_demo-plone-latest-py3.log\".format(zeoclient_port=env.zeoclient_port), user=env.deploy_user) # noqa: E501\n else:\n with cd(env.directory):\n sudo(\"/usr/bin/wget -O- --user=admin --password=admin --post-data='site_id=Plone&form.submitted=True&title=Website&default_language=de&portal_timezone=Europe/Berlin&extension_ids=plonetheme.barceloneta:default&extension_ids=plone.app.contenttypes:plone-content&extension_ids=plonedemo.site:default' http://127.0.0.1:{zeoclient_port}/@@plone-addsite &> ./var/log/wget_demo-plone-latest-py2.log\".format(zeoclient_port=env.zeoclient_port), user=env.deploy_user) # noqa: E501\n else:\n with cd(env.directory):\n sudo(\"/usr/bin/wget -O- --user=admin --password=admin --post-data='site_id=Plone&form.submitted=True&title=Website&default_language=de&portal_timezone=Europe/Berlin&extension_ids=plonetheme.barceloneta:default&extension_ids=plone.app.contenttypes:plone-content&extension_ids=plonedemo.site:default' http://127.0.0.1:{zeoclient_port}/@@plone-addsite &> ./var/log/wget_demo-plone.log\".format(zeoclient_port=env.zeoclient_port), user=env.deploy_user) # noqa: E501\n\n # load page to warmup\n sudo('/usr/bin/wget -S -qO- {domain} > /dev/null'.format(domain=env.domain), user=env.deploy_user) # noqa: E501"
] | [
"0.65535074",
"0.6516614",
"0.6511681",
"0.64082664",
"0.6343064",
"0.6275903",
"0.625844",
"0.6231782",
"0.6076686",
"0.6070641",
"0.6041249",
"0.6015797",
"0.6015797",
"0.6012341",
"0.59814286",
"0.5954745",
"0.5914903",
"0.59111565",
"0.58789015",
"0.5869267",
"0.5842611",
"0.579653",
"0.5793881",
"0.57889915",
"0.57876885",
"0.57772505",
"0.5762005",
"0.5750749",
"0.57423407",
"0.57340217"
] | 0.7090284 | 0 |
Downgrade hosts via repository or RPMs | def downgrade(self, servers, clients):
if ".repo" in self.upgrade_repo:
repo_1 = self.upgrade_repo
repo_2 = self.downgrade_repo
self.updowngrade_via_repo(servers, clients, repo_1, repo_2)
else:
all_hosts = servers + clients
self.updowngrade_via_rpms(all_hosts, "downgrade", self.downgrade_repo) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def upgrade(self, servers, clients):\n if \".repo\" in self.upgrade_repo:\n repo_2 = self.upgrade_repo\n repo_1 = self.downgrade_repo\n self.updowngrade_via_repo(servers, clients, repo_1, repo_2)\n else:\n all_hosts = servers + clients\n self.updowngrade_via_rpms(all_hosts, \"upgrade\", self.upgrade_repo)",
"def upgrade_and_downgrade(self, fault_on_pool_upgrade=False):\n # (1)Setup\n self.log.info(\"(1)==Setup and show rpm, dmg and daos versions on all hosts.\")\n hosts_client = self.hostlist_clients\n hosts_server = self.hostlist_servers\n all_hosts = include_local_host(hosts_server)\n self.upgrade_repo = self.params.get(\"upgrade_repo\", '/run/interop/*')\n self.downgrade_repo = self.params.get(\"downgrade_repo\", '/run/interop/*')\n num_attributes = self.params.get(\"num_attributes\", '/run/attrtests/*')\n ior_api = self.params.get(\"api\", '/run/ior/*')\n mount_dir = self.params.get(\"mount_dir\", '/run/dfuse/*')\n self.show_daos_version(all_hosts, hosts_client)\n\n # (2)Create pool container and pool attributes\n self.log.info(\"(2)==Create pool attributes.\")\n self.add_pool(connect=False)\n pool_id = self.pool.identifier\n self.add_container(self.pool)\n self.container.open()\n self.daos_cmd = self.get_daos_command()\n pool_attr_dict = self.create_data_set(num_attributes)\n self.pool.pool.set_attr(data=pool_attr_dict)\n self.verify_pool_attrs(pool_attr_dict)\n self.container.close()\n self.pool.disconnect()\n\n # (3)Setup and run IOR\n self.log.info(\"(3)==Setup and run IOR.\")\n result = run_pcmd(hosts_client, \"mkdir -p {}\".format(mount_dir))\n ior_timeout = self.params.get(\"ior_timeout\", '/run/ior/*')\n iorflags_write = self.params.get(\"write_flg\", '/run/ior/iorflags/*')\n iorflags_read = self.params.get(\"read_flg\", '/run/ior/iorflags/*')\n testfile = os.path.join(mount_dir, \"testfile\")\n testfile_sav = os.path.join(mount_dir, \"testfile_sav\")\n testfile_sav2 = os.path.join(mount_dir, \"testfile_sav2\")\n symlink_testfile = os.path.join(mount_dir, \"symlink_testfile\")\n # (3.a)ior dfs\n if ior_api in (\"DFS\", \"POSIX\"):\n self.log.info(\"(3.a)==Run non-HDF5 IOR write and read.\")\n self.ior_cmd.flags.update(iorflags_write)\n self.run_ior_with_pool(\n timeout=ior_timeout, create_pool=True, create_cont=True, stop_dfuse=False)\n self.ior_cmd.flags.update(iorflags_read)\n self.run_ior_with_pool(\n timeout=ior_timeout, create_pool=False, create_cont=False, stop_dfuse=False)\n\n # (3.b)ior hdf5\n elif ior_api == \"HDF5\":\n self.log.info(\"(3.b)==Run IOR HDF5 write and read.\")\n hdf5_plugin_path = self.params.get(\"plugin_path\", '/run/hdf5_vol/')\n self.ior_cmd.flags.update(iorflags_write)\n self.run_ior_with_pool(\n plugin_path=hdf5_plugin_path, mount_dir=mount_dir,\n timeout=ior_timeout, create_pool=True, create_cont=True, stop_dfuse=False)\n self.ior_cmd.flags.update(iorflags_read)\n self.run_ior_with_pool(\n plugin_path=hdf5_plugin_path, mount_dir=mount_dir,\n timeout=ior_timeout, create_pool=False, create_cont=False, stop_dfuse=False)\n else:\n self.fail(\"##(3)Unsupported IOR api {}\".format(ior_api))\n\n # (3.c)ior posix test file with symlink\n if ior_api == \"POSIX\":\n self.log.info(\"(3.c)==Symlink mounted testfile.\")\n result = run_pcmd(hosts_client, \"cd {}\".format(mount_dir))\n result = run_pcmd(hosts_client, \"ls -l {}\".format(testfile))\n result = run_pcmd(hosts_client, \"cp {0} {1}\".format(testfile, testfile_sav))\n self.check_result(result)\n result = run_pcmd(hosts_client, \"cp {0} {1}\".format(testfile, testfile_sav2))\n self.check_result(result)\n result = run_pcmd(\n hosts_client, \"ln -vs {0} {1}\".format(testfile_sav2, symlink_testfile))\n result = run_pcmd(hosts_client, \"diff {0} {1}\".format(testfile, testfile_sav))\n self.check_result(result)\n result = run_pcmd(hosts_client, \"ls -l {}\".format(symlink_testfile))\n self.check_result(result)\n self.container.close()\n self.pool.disconnect()\n result = run_pcmd(hosts_client, \"fusermount3 -u {}\".format(mount_dir))\n self.check_result(result)\n\n # Verify pool attributes before upgrade\n self.log.info(\"(3.2)==verify pool attributes before upgrade.\")\n self.verify_pool_attrs(pool_attr_dict)\n\n # (4)dmg system stop\n self.log.info(\"(4)==Dmg system stop.\")\n self.get_dmg_command().system_stop()\n errors = []\n errors.extend(self._stop_managers(self.server_managers, \"servers\"))\n errors.extend(self._stop_managers(self.agent_managers, \"agents\"))\n\n # (5)Upgrade\n self.log.info(\"(5)==Upgrade RPMs to 2.2.\")\n self.upgrade(hosts_server, hosts_client)\n\n self.log.info(\"==sleeping 30 more seconds\")\n time.sleep(30)\n # (6)Restart servers\n self.log.info(\"(6)==Restart servers.\")\n self.restart_servers()\n\n # (7)Verification after upgrade\n # Restart agent\n self.log.info(\"(7.1)====Restarting rel_2.2 agent after upgrade.\")\n self._start_manager_list(\"agent\", self.agent_managers)\n self.show_daos_version(all_hosts, hosts_client)\n\n self.get_dmg_command().pool_list(verbose=True)\n self.get_dmg_command().pool_query(pool=pool_id)\n self.daos_cmd.pool_query(pool=pool_id)\n\n # Verify pool attributes\n self.log.info(\"(7.2)====Verifying pool attributes after upgrade.\")\n self.verify_pool_attrs(pool_attr_dict)\n self.daos_ver_after_upgraded(hosts_client)\n\n # Verify IOR data and symlink\n self.log.info(\"(7.3)====Verifying container data IOR read.\")\n if ior_api == \"DFS\":\n self.log.info(\"(7.a)==Run IOR DFS read verification.\")\n self.run_ior_with_pool(\n timeout=ior_timeout, create_pool=False, create_cont=False, stop_dfuse=False)\n elif ior_api == \"HDF5\":\n self.log.info(\"(7.b)==Run IOR HDF5 read verification.\")\n self.run_ior_with_pool(\n plugin_path=hdf5_plugin_path, mount_dir=mount_dir,\n timeout=ior_timeout, create_pool=False, create_cont=False, stop_dfuse=False)\n else:\n self.log.info(\"(7.c)==Run Symlink check after upgraded.\")\n result = run_pcmd(\n hosts_client,\n \"dfuse --mountpoint {0} --pool {1} --container {2}\".format(\n mount_dir, pool_id, self.container))\n self.check_result(result)\n result = run_pcmd(hosts_client, \"diff {0} {1}\".format(testfile, testfile_sav))\n self.check_result(result)\n result = run_pcmd(hosts_client, \"diff {0} {1}\".format(symlink_testfile, testfile_sav2))\n self.check_result(result)\n\n # (8)Dmg pool get-prop\n self.log.info(\"(8)==Dmg pool get-prop after RPMs upgraded before Pool upgraded\")\n result = run_pcmd(hosts_client, \"dmg pool get-prop {}\".format(pool_id))\n self.check_result(result)\n\n # (9)Pool property verification after upgraded\n self.log.info(\"(9)==Dmg pool upgrade and get-prop after RPMs upgraded\")\n\n if fault_on_pool_upgrade and self.has_fault_injection(hosts_client):\n self.log.info(\"(9.1a)==Pool upgrade with fault-injection.\")\n self.pool_upgrade_with_fault(hosts_client, pool_id)\n else:\n self.log.info(\"(9.1b)==Pool upgrade.\")\n result = run_pcmd(hosts_client, \"dmg pool upgrade {}\".format(pool_id))\n self.check_result(result)\n result = run_pcmd(hosts_client, \"dmg pool get-prop {}\".format(pool_id))\n self.check_result(result)\n self.log.info(\"(9.2)==verify pool attributes after pool-upgraded.\")\n self.verify_pool_attrs(pool_attr_dict)\n self.pool.destroy()\n\n # (10)Create new pool\n self.log.info(\"(10)==Create new pool after rpms Upgraded\")\n self.add_pool(connect=False)\n pool2_id = self.pool.identifier\n self.get_dmg_command().pool_list(verbose=True)\n self.get_dmg_command().pool_query(pool=pool2_id)\n self.daos_cmd.pool_query(pool=pool2_id)\n result = run_pcmd(hosts_client, \"dmg pool get-prop {}\".format(pool2_id))\n self.check_result(result)\n\n # (11)Downgrade and cleanup\n self.log.info(\"(11)==Downgrade and cleanup.\")\n if ior_api == \"POSIX\":\n result = run_pcmd(hosts_client, \"fusermount3 -u {}\".format(mount_dir))\n self.check_result(result)\n self.container.close()\n self.pool.disconnect()\n self.pool.destroy()\n self.get_dmg_command().system_stop()\n errors = []\n errors.extend(self._stop_managers(self.server_managers, \"servers\"))\n errors.extend(self._stop_managers(self.agent_managers, \"agents\"))\n self.log.info(\"(11.1)==Downgrade RPMs to 2.0.3.\")\n self.downgrade(hosts_server, hosts_client)\n self.log.info(\"==sleeping 30 more seconds\")\n time.sleep(30)\n\n # (12)Cleanup restart server and agent\n self.log.info(\"(12)==Restart 2.0 servers and agent.\")\n self.restart_servers()\n self._start_manager_list(\"agent\", self.agent_managers)\n self.show_daos_version(all_hosts, hosts_client)\n if fault_on_pool_upgrade and not self.has_fault_injection(hosts_client):\n self.fail(\"##(12)Upgraded-rpms did not have fault-injection feature.\")\n self.log.info(\"==(12)Test passed\")",
"def downgrade():\n pass",
"def downgrade():\n pass",
"def downgrade():\n raise NotImplementedError(\"Downgrade is not supported\")",
"def downgrade():\n raise NotImplementedError(\"Downgrade is not supported\")",
"def downgrade():\n raise NotImplementedError(\"Downgrade is not supported\")",
"def upgrade(self,summary_handle,role,rpm_keyword,image_url,dir_installer,exit_flag,mode,summary_var_dict={}):\n if image_url.endswith(\"/\"):\n imageurl_final = image_url\n else:\n imageurl_final = image_url + \"/\"\n\n length = len(imageurl_final.split('/')) -4\n cmd = \"yum clean all\"\n self.sendCmd(cmd,300)\n dir_installer_role = dir_installer + \"/\" + role\n self.changeDirectory(dir_installer_role)\n tmp_var = \"wget%s%s\" %(self,role)\n\n ##### IF loop added for recovery option\n if mode == \"RECOVERY\":\n flag = self.check_var_in_dict(tmp_var,summary_var_dict)\n if flag == \"false\":\n self.download_rpm(summary_handle,length,imageurl_final,role)\n else:\n self.download_rpm(summary_handle,length,imageurl_final,role)\n\n\n num_files = \"ls -lrt *\\.rpm | grep %s-[0-9] | awk \\'{print $NF}\\' | xargs ls -t | tail -n1\" %rpm_keyword\n output = self.sendCmd(num_files).split(\"\\n\")\n for each in output:\n if each.rstrip().endswith(\"rpm\"):\n\n ##### Step added for uninstalling the rpm before installing \n tmpcmd = \"yum -y remove \" + each.rstrip().rstrip(\".rpm\")\n\n\n tmpcmd1 = \"yum -y install \" + each.rstrip()\n tmp_var = \"%s%s%s\" %(tmpcmd1,self,role)\n\n ##### IF loop added for recovery option\n if mode == \"RECOVERY\":\n flag = self.check_var_in_dict(tmp_var,summary_var_dict)\n if flag == \"true\":\n continue\n\n\n output = self.sendCmd(tmpcmd,600)\n output = self.sendCmd(tmpcmd1,600)\n time.sleep(30)\n output1 = self.sendCmd(\"echo $?\").split(\"\\n\")\n output2 = [item.replace(\"\\r\", \"\") for item in output1]\n if \"0\" not in output2 :\n summary_handle.write(\"%s,%s,%s,fail \\n\" %(tmpcmd1,self,role))\n if exit_flag == \"yes\":\n report.fail(\"Installation failed for %s on node %s having role %s with following error message : \\n %s\" %(each.strip(),self,role,output))\n else:\n logger.info(\"Installation failed for %s on node %s having role %s with following error message : \\n %s\" %(each.strip(),self,role,output))\n else:\n summary_handle.write(\"%s,%s,%s,pass \\n\" %(tmpcmd1,self,role))\n logger.info(\"Successful installation of %s on node %s having role %s\" %(each.strip(),self,role))",
"def maintenance_down():\n install_apache_conf()\n reboot()",
"def pool_upgrade_with_fault(self, hosts, pool_id):\n # Verify pool status before upgrade\n expected_status = \"not started\"\n self.verify_pool_upgrade_status(pool_id, expected_status)\n\n # Enable fault-injection\n self.enable_disable_fault_injection(hosts, enable=True)\n\n # Pool upgrade\n result = run_pcmd(hosts, \"dmg pool upgrade {}\".format(pool_id))\n self.check_result(result)\n # Verify pool status during upgrade\n expected_status = \"in progress\"\n self.verify_pool_upgrade_status(pool_id, expected_status)\n # Verify pool status during upgrade\n expected_status = \"failed\"\n self.verify_pool_upgrade_status(pool_id, expected_status)\n\n # Disable fault-injection\n self.enable_disable_fault_injection(hosts, enable=False)\n # Verify pool upgrade resume after removal of fault-injection\n expected_status = \"completed\"\n self.verify_pool_upgrade_status(pool_id, expected_status)",
"def upgrade_server():\n log('Atualizando programas', yellow)\n sudo('apt-get -y upgrade')",
"def package_upgrade():\n\n if (do_action_package_upgrade('nova-common',\n do_openstack_upgrade,\n CONFIGS)):\n # we should restart the container scoped (subordinate) plugins after a\n # managed openstack upgrade see: BUG#1835557\n for rid in relation_ids('neutron-plugin'):\n neutron_plugin_joined(rid, remote_restart=True)\n for rid in relation_ids('nova-ceilometer'):\n nova_ceilometer_joined(rid, remote_restart=True)\n for rid in relation_ids('nova-vgpu'):\n nova_vgpu_joined(rid, remote_restart=True)\n # NOTE(ajkavanagh) - if unit is paused (usually true for managed\n # upgrade) then the config_changed() function is a no-op\n config_changed()",
"def upgrade(self):\n # The workaround we need in order to fix [1]. In few words,\n # when new Docker is installed the containers MUST NOT start\n # again because in this case puppet inside them will install\n # latest packages and breaks dependencies in some soft.\n #\n # [1]: https://bugs.launchpad.net/fuel/+bug/1455419\n self.supervisor.stop_all_services()\n\n self.install_repos()\n self.update_repo()\n self.install_packages()\n self.run_puppet()",
"def upgrade_kernel():\n execute(\"upgrade_kernel_node\", env.host_string)",
"def test_can_downgrade(self):\n self.change_status(self.version_1_2_0, amo.STATUS_PENDING)\n for v in Version.objects.filter(pk__gte=self.version_1_2_1):\n v.delete()\n version, file = self.get('1.2', self.version_int,\n self.app, self.platform)\n\n assert version == self.version_1_1_3",
"def upgrade_kernel_node(*args):\n for host_string in args:\n with settings(host_string=host_string):\n dist, version, extra = get_linux_distro()\n print \"upgrading apparmor before upgrading kernel\"\n if version == '12.04':\n apt_install([\"apparmor\"])\n print \"Installing 3.13.0-34 kernel headers\"\n apt_install([\"linux-headers-3.13.0-34\"])\n apt_install([\"linux-headers-3.13.0-34-generic\"])\n print \"Upgrading the kernel to 3.13.0-34\"\n apt_install([\"linux-image-3.13.0-34-generic\"])\n elif version == '14.04':\n print \"Installing 3.13.0-40 kernel headers\"\n apt_install([\"linux-headers-3.13.0-40\",\n \"linux-headers-3.13.0-40-generic\"])\n print \"Upgrading the kernel to 3.13.0-40\"\n apt_install([\"linux-image-3.13.0-40-generic\",\n \"linux-image-extra-3.13.0-40-generic\"])",
"def task_upgrade(self):\n with settings(user=self.serviceUser):\n self.update()\n run(\"~/virtualenv/bin/trac-admin {}/trac-env upgrade\".format(self.configDir))\n run(\"~/virtualenv/bin/trac-admin {}/trac-env wiki upgrade\".format(self.configDir))\n\n self.task_restart()",
"def down(config):\n pass",
"def test_redeploy_edges(self):\n pass",
"def diff_versions_agent_server(self):\n # (1)Setup\n self.log.info(\"==(1)Setup, create pool and container.\")\n hosts_client = self.hostlist_clients\n hosts_server = self.hostlist_servers\n all_hosts = include_local_host(hosts_server | hosts_client)\n self.upgrade_repo = self.params.get(\"upgrade_repo\", '/run/interop/*')\n self.downgrade_repo = self.params.get(\"downgrade_repo\", '/run/interop/*')\n self.add_pool(connect=False)\n pool_id = self.pool.identifier\n self.add_container(self.pool)\n self.container.open()\n cmd = \"dmg system query\"\n positive_test = True\n negative_test = False\n agent_server_ver = \"2.0 agent to 2.0 server\"\n self.verify_daos_libdaos(\"1.1\", hosts_client, cmd, positive_test, agent_server_ver)\n\n # (2)dmg system stop\n self.log.info(\"==(2)Dmg system stop.\")\n self.get_dmg_command().system_stop()\n errors = []\n errors.extend(self._stop_managers(self.server_managers, \"servers\"))\n errors.extend(self._stop_managers(self.agent_managers, \"agents\"))\n\n # (3)Upgrade 1 server-host to new\n self.log.info(\"==(3)Upgrade 1 server to 2.2.\")\n server = hosts_server[0:1]\n self.upgrade(server, [])\n self.log.info(\"==(3.1)server %s Upgrade to 2.2 completed.\", server)\n\n # (4)Negative test - dmg pool query on mix-version servers\n self.log.info(\"==(4)Negative test - dmg pool query on mix-version servers.\")\n agent_server_ver = \"2.0 agent, mix-version server-hosts\"\n cmd = \"dmg pool list\"\n exp_err = \"unable to contact the DAOS Management Service\"\n self.verify_daos_libdaos(\n \"4.1\", hosts_client, cmd, negative_test, agent_server_ver, exp_err)\n\n # (5)Upgrade rest server-hosts to 2.2\n server = hosts_server[1:len(hosts_server)]\n self.log.info(\"==(5) Upgrade rest server %s to 2.2.\", server)\n self.upgrade(server, [])\n self.log.info(\"==(5.1) server %s Upgrade to 2.2 completed.\", server)\n\n # (6)Restart 2.0 agent\n self.log.info(\"==(6)Restart 2.0 agent\")\n self._start_manager_list(\"agent\", self.agent_managers)\n self.show_daos_version(all_hosts, hosts_client)\n\n # (7)Verify 2.0 agent connect to 2.2 server\n self.log.info(\"==(7)Verify 2.0 agent connect to 2.2 server\")\n agent_server_ver = \"2.0 agent to 2.2 server\"\n cmd = \"daos pool query {0}\".format(pool_id)\n self.verify_daos_libdaos(\"7.1\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"dmg pool query {0}\".format(pool_id)\n exp_err = \"admin:0.0.0 are not compatible\"\n self.verify_daos_libdaos(\n \"7.2\", hosts_client, cmd, negative_test, agent_server_ver, exp_err)\n cmd = \"sudo daos_agent dump-attachinfo\"\n self.verify_daos_libdaos(\"7.3\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos cont create {0} --type POSIX --properties 'rf:2'\".format(pool_id)\n self.verify_daos_libdaos(\"7.4\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos pool autotest --pool {0}\".format(pool_id)\n self.verify_daos_libdaos(\"7.5\", hosts_client, cmd, positive_test, agent_server_ver)\n\n # (8)Upgrade agent to 2.2\n self.log.info(\"==(8)Upgrade agent to 2.2, now 2.2 servers 2.2 agent.\")\n self.upgrade([], hosts_client)\n self._start_manager_list(\"agent\", self.agent_managers)\n self.show_daos_version(all_hosts, hosts_client)\n\n # (9)Pool and containers create on 2.2 agent and server\n self.log.info(\"==(9)Create new pools and containers on 2.2 agent to 2.2 server\")\n agent_server_ver = \"2.2 agent to 2.2 server\"\n cmd = \"dmg pool create --size 5G New_pool1\"\n self.verify_daos_libdaos(\"9.1\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"dmg pool list\"\n self.verify_daos_libdaos(\"9.2\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos cont create New_pool1 C21 --type POSIX --properties 'rf:2'\"\n self.verify_daos_libdaos(\"9.3\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos cont create New_pool1 C22 --type POSIX --properties 'rf:2'\"\n self.verify_daos_libdaos(\"9.4\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos container list New_pool1\"\n self.verify_daos_libdaos(\"9.5\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"sudo daos_agent dump-attachinfo\"\n self.verify_daos_libdaos(\"9.6\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos pool autotest --pool New_pool1\"\n self.verify_daos_libdaos(\"9.7\", hosts_client, cmd, positive_test, agent_server_ver)\n\n # (10)Downgrade server to 2.0\n self.log.info(\"==(10)Downgrade server to 2.0, now 2.2 agent to 2.0 server.\")\n self.log.info(\"==(10.1)Dmg system stop.\")\n self.get_dmg_command().system_stop()\n errors = []\n errors.extend(self._stop_managers(self.server_managers, \"servers\"))\n errors.extend(self._stop_managers(self.agent_managers, \"agents\"))\n self.log.info(\"==(10.2)Downgrade server to 2.0\")\n self.downgrade(hosts_server, [])\n self.log.info(\"==(10.3)Restart 2.0 agent\")\n self._start_manager_list(\"agent\", self.agent_managers)\n self.show_daos_version(all_hosts, hosts_client)\n\n # (11)Verify 2.2 agent to 2.0 server\n agent_server_ver = \"2.2 agent to 2.0 server\"\n cmd = \"daos pool query {0}\".format(pool_id)\n self.verify_daos_libdaos(\"11.1\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"dmg pool query {0}\".format(pool_id)\n exp_err = \"does not match\"\n self.verify_daos_libdaos(\n \"11.2\", hosts_client, cmd, negative_test, agent_server_ver, exp_err)\n cmd = \"sudo daos_agent dump-attachinfo\"\n self.verify_daos_libdaos(\"11.3\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos cont create {0} 'C_oldP' --type POSIX --properties 'rf:2'\".format(\n pool_id)\n self.verify_daos_libdaos(\"11.4\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos cont create New_pool1 'C_newP' --type POSIX --properties 'rf:2'\"\n exp_err = \"DER_NO_SERVICE(-2039)\"\n self.verify_daos_libdaos(\n \"11.5\", hosts_client, cmd, negative_test, agent_server_ver, exp_err)\n exp_err = \"common ERR\"\n cmd = \"daos pool autotest --pool {0}\".format(pool_id)\n self.verify_daos_libdaos(\n \"11.6\", hosts_client, cmd, negative_test, agent_server_ver, exp_err)\n\n # (12)Downgrade agent to 2.0\n self.log.info(\"==(12)Agent %s Downgrade started.\", hosts_client)\n self.downgrade([], hosts_client)\n self.log.info(\"==Test passed\")",
"def test_redeploy(self):\n pass",
"def m_DownPkgAndTar(self,pkgURL,machineIps,port,username,password):\n packageName = pkgURL.split(\"/\")[-1]\n execmd = \"cd /root\\nwget \" + pkgURL + \"\\ntar -xzvf \" + packageName\n for k, v in machineIps.items():\n b.sshclient_execmd(k, port,username,password,execmd)",
"def downgrade(revision, sql):\n alembic_command.downgrade(alembic_config, revision, sql=sql)",
"def upgrade_packages():\n\n require('environment', provided_by=env.environments)\n system.update_apt_sources()\n system.upgrade_apt_packages()",
"def daos_ver_after_upgraded(self, host):\n cmds = [\n \"daos version\",\n \"dmg version\",\n \"daos pool query {}\".format(self.pool.identifier)]\n for cmd in cmds:\n self.log.info(\"==cmd= %s\", cmd)\n result = pcmd(host, cmd, False)\n if 0 not in result or len(result) > 1:\n failed = []\n for item, value in list(result.items()):\n if item != 0:\n failed.extend(value)\n raise CommandFailure(\"##Error occurred running '{}' on {}\".format(\n cmd, host))\n self.log.info(\"==>%s result= %s\", cmd, result)",
"def test_downgrade_control(self, ping_fixture_all_errs_disconnect):\n\n engine = ping_fixture_all_errs_disconnect\n\n conn = engine.connect()\n conn.close()",
"def downgrade(self, revision):\n alembic.command.downgrade(self.alembic_config(), revision)",
"def ping_many_updown(self, hosts):\n raise NotImplementedError()",
"def down_nodes(nodelist, reason):\n if isinstance(nodelist, list):\n nodelist = util.to_hostlist(nodelist)\n run(f\"{lkp.scontrol} update nodename={nodelist} state=down reason='{reason}'\")",
"def reinstall_host(self, hostid, config, **kwargs):\n pass"
] | [
"0.6603967",
"0.6249081",
"0.6235765",
"0.6235765",
"0.62223345",
"0.62223345",
"0.62223345",
"0.61755437",
"0.6065216",
"0.6048611",
"0.6013978",
"0.5956582",
"0.5954306",
"0.5892654",
"0.58716166",
"0.5861164",
"0.5860646",
"0.58190066",
"0.57984877",
"0.57966673",
"0.57800317",
"0.57785654",
"0.57540977",
"0.5752914",
"0.56928384",
"0.5618237",
"0.5608636",
"0.5605862",
"0.56052285",
"0.5573081"
] | 0.6965447 | 0 |
To display daos and dmg version, and check for error. | def daos_ver_after_upgraded(self, host):
cmds = [
"daos version",
"dmg version",
"daos pool query {}".format(self.pool.identifier)]
for cmd in cmds:
self.log.info("==cmd= %s", cmd)
result = pcmd(host, cmd, False)
if 0 not in result or len(result) > 1:
failed = []
for item, value in list(result.items()):
if item != 0:
failed.extend(value)
raise CommandFailure("##Error occurred running '{}' on {}".format(
cmd, host))
self.log.info("==>%s result= %s", cmd, result) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show_version():\n print(\"===============================================================\")\n print(f\"Productivity App v{__VERSION__}\")\n print(f\"Made by {__AUTHOR__} (with the advices of {__ADVISOR__})\")\n print(\"Source : https://github.com/Ilade-s/productivite-app-TkVer\")\n print(\"Server (optionnal) : https://github.com/Tifiloow/productivite-app\")\n print(\"Assets : https://feathericons.com/\")\n print(\"===============================================================\")",
"def check_installation(cmdl_args):\n if not which('Dimemas'):\n print('==WARNING== Could not find Dimemas. Please make sure Dimemas is correctly installed and in the path.')\n\n if cmdl_args.debug:\n print(f'==DEBUG== Using {__file__} {__version__}')\n print(f'==DEBUG== Using {sys.executable}.{map(str, sys.version_info[:3])}')\n\n try:\n print(f'==DEBUG== Using Pandas {pd.__version__}')\n except NameError:\n print(f'==ERROR== Pandas not installed.')\n sys.exit(1)\n try:\n print(f'==DEBUG== Using Dask {dask.__version__}')\n except NameError:\n print('==ERROR== Dask not installed')\n try:\n print(f'==DEBUG== Using h5py {h5py.__version__}')\n except NameError:\n print('==ERROR== h5py not installed')\n print(f\"==DEBUG== Using {which('Dimemas')}\")\n print('')\n\n return",
"def show_version():\n terminal.echo(f\"{package_metadata['name']} {package_metadata['version']}\")",
"def show_version():\n from gpgcloud import __version__\n print os.path.basename(sys.argv[0]), __version__\n sys.exit(0)",
"def display_version_and_exit():\n sys.stdout.write(\"%s\\n\" % __version__)\n sys.exit(0)",
"def version():\n\tclick.clear()\n\trich.print(\"[bold magenta]Image Dataset Tool (IDT)[/bold magenta] version 0.0.6 beta\")",
"def test_denoiser_supported_version(self):\r\n\r\n pass_test = True\r\n try:\r\n check_flowgram_ali_exe()\r\n except (ApplicationNotFoundError, ApplicationError):\r\n pass_test = False\r\n\r\n self.assertTrue(pass_test,\r\n \"Denoiser flowgram aligner not found or not \"\r\n \"executable. This may or may not be a problem \"\r\n \"depending on which components of QIIME you plan to \"\r\n \"use.\")",
"def show_versions():\n sys_info = _get_sys_info()\n deps_info = _get_deps_info()\n\n print(\"\\nSystem:\")\n for k, stat in sys_info.items():\n print(\"{k:>10}: {stat}\".format(k=k, stat=stat))\n\n print(\"\\nPython dependencies:\")\n for k, stat in deps_info.items():\n print(\"{k:>13}: {stat}\".format(k=k, stat=stat))",
"def version_option():\n print \"%s %s <%s>\" % (PROGRAM_NAME,PROGRAM_VERSION,PROGRAM_SOURCE)",
"def printVersion(self):\n print system.about_string()\n sys.exit(0)",
"def show_versions():\n sys_info = _get_sys_info()\n versions = _get_autogluon_versions()\n sorted_keys = sorted(versions.keys(), key=lambda x: x.lower())\n\n maxlen = 0 if len(versions) == 0 else max(len(x) for x in versions)\n print(\"\\nINSTALLED VERSIONS\")\n print(\"------------------\")\n for k, v in sys_info.items():\n print(f\"{k:<{maxlen}}: {v}\")\n print(\"\")\n for k in sorted_keys:\n print(f\"{k:<{maxlen}}: {versions[k]}\")",
"def check(force, verbose, packages):\n code = 0\n packages = CFG.package_specs(packages)\n if not packages:\n print(\"No packages installed\")\n sys.exit(0)\n\n for pspec in packages:\n desired = pspec.get_desired_version_info(force=force)\n dv = runez.bold(desired.version)\n manifest = pspec.get_manifest()\n if desired.problem:\n msg = desired.problem\n code = 1\n\n elif not manifest or not manifest.version:\n msg = \"v%s is not installed\" % dv\n code = 1\n\n elif manifest.version == desired.version:\n msg = \"v%s is installed\" % dv\n\n else:\n action = \"upgraded to\" if desired.source == \"latest\" else \"caught up to %s\" % desired.source\n msg = \"v%s installed, can be %s v%s\" % (runez.dim(manifest.version), action, dv)\n\n print(\"%s: %s\" % (pspec.dashed, msg))\n\n sys.exit(code)",
"def troubleshoot():\n libraries = (sys, pd, openpyxl, matplotlib, pip)\n for i in libraries:\n try:\n print(str(i), 'version:', i.__version__)\n except AttributeError:\n pass\n except ModuleNotFoundError:\n print('You do not have', str(i), 'installed.')\n print('You can do so via your interpreter or:')\n print('py -m pip install', '-' + str(i))\n print('in command prompt')",
"def logo():\n print (\"\"\"\\\n _ _\n| |_ ___ ___ ___ _ _ _| |\n| | . | | -_| | | . |\n|_|_|___|_|_|___|_ |___|\n |___|\n \"\"\")\n print ('Author: Peter Sooky <[email protected]>')\n print ('Honeyd-python {0}'.format(honeyd.__version__))",
"def test_cdhit_supported_version(self):\r\n self.assertTrue(which('cd-hit'),\r\n \"cd-hit not found. This may or may not be a problem depending on \" +\r\n \"which components of QIIME you plan to use.\")\r\n # cd-hit does not have a version print in their program\r",
"def report(*packages):\n accepted_commands = ['python','conda']\n for package in packages:\n loc = \"not installed in this environment\"\n ver = \"unknown\"\n\n try:\n module = importlib.import_module(package)\n loc = os.path.dirname(module.__file__)\n\n try:\n ver = str(module.__version__)\n except Exception:\n pass\n \n except (ImportError, ModuleNotFoundError):\n if package in accepted_commands:\n try:\n # See if there is a command by that name and check its --version if so\n try:\n loc = subprocess.check_output(['command','-v', package]).decode().splitlines()[0].strip()\n except:\n # .exe in case powershell (otherwise wouldn't need it)\n loc = subprocess.check_output(['where.exe', package]).decode().splitlines()[0].strip() \n out = \"\"\n try:\n out = subprocess.check_output([package, '--version'], stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n out = e.output\n\n # Assume first word in output with a period and digits is the version\n for s in out.decode().split():\n if '.' in s and str.isdigit(s[0]) and sum(str.isdigit(c) for c in s)>=2:\n ver=s.strip()\n break\n except:\n pass\n elif package == 'system':\n try:\n ver = platform.platform(terse=True)\n loc = \"OS: \" + platform.platform()\n except Exception:\n pass\n else:\n pass\n \n print(\"{0:30} # {1}\".format(package + \"=\" + ver,loc))",
"def show_version(ctx, param, value):\n if not value or ctx.resilient_parsing:\n return\n click.echo('Zap AppImage utility')\n click.echo('version: {}'.format(__version__))\n ctx.exit()",
"def _check_min_required(self):\n self._adb_available = False\n try:\n adb_version = utils.do_shell_command('adb --version')\n if adb_version:\n if adb_version and 'Android Debug Bridge' in adb_version:\n self._adb_available = True\n else:\n self._adb_available = False\n\n if self._adb_available:\n self._adb_available = False\n adb_devices = utils.do_shell_command('adb devices')\n\n try:\n if adb_devices:\n adb_devices = adb_devices.split(os.linesep)\n\n for i, adb_device in enumerate(adb_devices):\n if not adb_device: # skip empty lines at bottom\n continue\n if i == 0: # skip first line 'List of devices attached'\n continue\n if adb_device.startswith('*'): # skip these lines '* daemon started successfully *'\n continue\n\n self._adb_available = True\n\n if not self._adb_available:\n print('No Devices! Make sure \\'Usb-Debugging\\' is enabled in DeveloperSettings')\n\n except Exception as e:\n print(e)\n\n # io error is handled here not in do_shell_command\n # if adb isnt there it gives file not found\n except IOError as io_error:\n # file not found\n if io_error.errno == 2:\n self._adb_available = False",
"def version():\n click.echo(u'shellfoundry version ' + pkg_resources.get_distribution(u'shellfoundry').version)",
"def show_versions():\n\n print(\n f\"Version info: \"\n f\"autodoc_pydantic: {get_version('sphinxcontrib.autodoc_pydantic')} | \"\n f\"pydantic: {get_version_special('pydantic')} | \"\n f\"sphinx: {get_version('sphinx')} | \"\n f\"sphinx_rtd_theme: {get_version('sphinx_rtd_theme')} | \"\n f\"sphinx_tabs: {get_version('sphinx_tabs')}\")",
"def _display_help():\n if parameters[\"Command flavour\"] in (\"bsd\", \"bsd:freebsd\"):\n print(\"usage: ident [--debug] [--help|-?] [--version]\", file=sys.stderr)\n print(\" [-qV] [--] [file ...]\", file=sys.stderr)\n print(\n \" --------- ------------------------------------------\",\n file=sys.stderr\n )\n print(\" -q Quiet mode\", file=sys.stderr)\n print(\" -V Do nothing and exit\", file=sys.stderr)\n print(\" --debug Enable debug mode\", file=sys.stderr)\n print(\" --help|-? Print usage and this help message and exit\", file=sys.stderr)\n print(\" --version Print version and exit\", file=sys.stderr)\n print(\" -- Options processing terminator\", file=sys.stderr)\n else: # if parameters[\"Command flavour\"] in (\"PNU\", \"gnu\", \"gnu:linux\", \"linux\"):\n print(\"usage: ident [--debug] [--help|-?] [-V|--version]\", file=sys.stderr)\n print(\" [-q] [--] [file ...]\", file=sys.stderr)\n print(\n \" ------------ ------------------------------------------\",\n file=sys.stderr\n )\n print(\" -q Quiet mode\", file=sys.stderr)\n print(\" --debug Enable debug mode\", file=sys.stderr)\n print(\" --help|-? Print usage and this help message and exit\", file=sys.stderr)\n print(\" -V|--version Print version and exit\", file=sys.stderr)\n print(\" -- Options processing terminator\", file=sys.stderr)\n print(file=sys.stderr)",
"def test_version(self):\n result = check_output([b\"flocker-reportstate\"] + [b\"--version\"])\n self.assertEqual(result, b\"%s\\n\" % (__version__,))",
"def show_help(argv=None):\n if argv:\n if \"list_datasets\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"list_datasets\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm ls\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"search_datasets\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"search_datasets\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm search <keyword>\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"show_mounts\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"show_mounts\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm ps\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"mount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"mount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm mount <dataset_name> [<mount_path>]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"mmount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"mmount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm mmount <dataset_name> [<dataset_name> ...]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"unmount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"unmount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm unmount <mount_id> [<cleanup_flag>]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"munmount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"munmount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm munmount <mount_id> [<mount_id> ...]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"clean\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"clean\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm clean\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n else:\n sdm_util.print_message(\"Unrecognized command\")\n return 1\n else:\n sdm_util.print_message(\"command : sdm <COMMAND> [<COMMAND_SPECIFIC_ARGS> ...]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(\"Available Commands\")\n\n tbl = PrettyTable()\n tbl.field_names = [\"COMMAND\", \"DESCRIPTION\"]\n for cmd in COMMANDS:\n command, _, desc = cmd\n command_str = \" | \".join(command)\n tbl.add_row([command_str, desc])\n\n sdm_util.print_message(tbl)\n sdm_util.print_message(\"\")\n return 0",
"def check_image_version(duthost):\n skip_release(duthost, per_command_check_skip_versions)",
"def test_version_display():\n output = subprocess.run(['smif', '-V'], stdout=subprocess.PIPE)\n assert smif.__version__ in str(output.stdout)",
"def __get_info(self, disk):\n # Checking the disk exists\n\tFNULL = open(os.devnull, 'w') # Defining /dev/null\n\tcheck = subprocess.call(['df', disk], stdout=FNULL,\\\n stderr=subprocess.STDOUT)\n # Disk not found\n if check != 0:\n sys.stdout.write(\"^fg(%s)!E! DiskNotFound^fg()\" % Colors.CL_BASE08)\n # Disk found\n else:\n # Executing command, parsing output and removing empty elements\n cmd = subprocess.Popen(['df', '-H', disk], stdout=subprocess.PIPE)\n cmd_out, cmd_err = cmd.communicate()\n cmd_outparsed = cmd_out.split(' ')\n cmd_outparsed = filter(None, cmd_outparsed)\n\n # Getting information\n disk_name = disk\n disk_size = cmd_outparsed[7]\n disk_used = cmd_outparsed[8]\n disk_available = cmd_outparsed[9]\n disk_percentage = cmd_outparsed[10].translate(None, \"%\")\n disk_percentage = int(disk_percentage)\n\n # Disk Name: ~\n sys.stdout.write(\"^fg(%s)[^fg()\" % Colors.CL_BASE02)\n if disk_name == DISK_DATA:\n sys.stdout.write(\"^fg(%s)~^fg()\" % Colors.CL_BASE0D)\n sys.stdout.write(\"^fg(%s): ^fg()\" % Colors.CL_BASE03)\n # Disk Name: /\n elif disk_name == DISK_ROOT:\n sys.stdout.write(\"^fg(%s)/^fg()\" % Colors.CL_BASE0D)\n sys.stdout.write(\"^fg(%s): ^fg()\" % Colors.CL_BASE03)\n \n # Disk Percentage: Good\n if 0 <= disk_percentage <= 60:\n sys.stdout.write(\"^fg(%s)%s%%^fg()\" % (Colors.CL_BASE0B, disk_percentage))\n sys.stdout.write(\"^fg(%s) %s^fg()\" % (Colors.CL_BASE0B, disk_used))\n sys.stdout.write(\"^fg(%s)/^fg()\" % Colors.CL_BASE03)\n # Disk Percentage: Fair\n elif 61 <= disk_percentage <= 90:\n sys.stdout.write(\"^fg(%s)%s%%^fg()\" % (Colors.CL_BASE09, disk_percentage))\n sys.stdout.write(\"^fg(%s) %s^fg()\" % (Colors.CL_BASE09, disk_used))\n sys.stdout.write(\"^fg(%s)/^fg()\" % Colors.CL_BASE03)\n # Disk Percentage: Weak\n elif 91 <= disk_percentage <= 100:\n sys.stdout.write(\"^fg(%s)%s%%^fg()\" % (Colors.CL_BASE08, disk_percentage))\n sys.stdout.write(\"^fg(%s) %s^fg()\" % (Colors.CL_BASE08, disk_used))\n sys.stdout.write(\"^fg(%s)/^fg()\" % Colors.CL_BASE03)\n sys.stdout.write(\"^fg(%s)%s^fg()\" % (Colors.CL_BASE0D, disk_size))\n sys.stdout.write(\"^fg(%s)]^fg()\" % Colors.CL_BASE02)",
"def check_os_version():\n if not version.is_supported_version():\n supported_releases = []\n for rel in version.SUPPORTED_VERSIONS:\n for ver in version.SUPPORTED_VERSIONS[rel]:\n supported_releases.append(rel.upper() + ' ' + ver)\n reporting.create_report([\n reporting.Title(\n 'The installed OS version is not supported for the in-place upgrade to the target RHEL version'\n ),\n reporting.Summary(\n 'The supported OS releases for the upgrade process:\\n'\n ' {}'.format('\\n'.join(supported_releases))\n ),\n reporting.Severity(reporting.Severity.HIGH),\n reporting.Groups(COMMON_REPORT_TAGS),\n reporting.Groups([reporting.Groups.INHIBITOR]),\n # we want to set a static Key here because of different Title per path\n reporting.Key('1c7a98849a747ec9890f04bf4321de7280970715')\n ] + related)",
"def ShowUpdatedDEPS(options, versions):\n for flavor, value in sorted(GetUpdatedDEPS(options, versions).iteritems()):\n keyname = HashKey(flavor)\n print '%s=%s' % (keyname, value)\n sys.stdout.flush()",
"def do_version(self, a):\n print(\"\\tversion: \" + (str(ise.getVersion())) +\n \".\" + (str(ise.getFirmware())))",
"def info():\n\n print('Maptool\\n--------\\n')\n print('Version: ' + __version__)\n print('Path: ' + __path__[0])\n print('Date: ' + __date__)\n print()\n\n import sys\n print('Python version=' + sys.version + '\\n')\n\n try:\n mm = __import__('pymongo')\n print('%10s %10s %s' % ('pymongo', mm.version, mm.__path__[0]))\n except ImportError:\n print('pymongo Not Found')\n\n for modui in ['numpy', 'scipy', 'mayavi', 'matplotlib', 'tqdm',\n 'future', 'nose', 'coverage', 'spglib', 'pyhull', 'pymatgen', 'qmpy', ]:\n try:\n mm = __import__(modui)\n print('%10s %10s %s' % (modui, mm.__version__, mm.__path__[0]))\n except ImportError:\n print('%10s %10s Not Found' % (modui, ''))\n\n if ASE:\n import ase\n #from ase import version as ase_version\n print('%10s %10s %s' % ('ase', ase.__version__, ase.__path__[0]))\n else:\n print('%10s %10s Not Found' % ('ase', ''))"
] | [
"0.61408764",
"0.60227036",
"0.5977592",
"0.59699047",
"0.5859191",
"0.5809201",
"0.5670012",
"0.56505746",
"0.56443745",
"0.5622813",
"0.5611506",
"0.5571046",
"0.55378354",
"0.5531627",
"0.54901385",
"0.54891825",
"0.54883015",
"0.54713386",
"0.54710495",
"0.5470672",
"0.5427179",
"0.5415402",
"0.54075515",
"0.5397546",
"0.5387158",
"0.53825396",
"0.5372509",
"0.53532195",
"0.5352779",
"0.53463846"
] | 0.6510692 | 0 |
Verify daos and libdaos interoperability between different version of agent and server. | def verify_daos_libdaos(self, step, hosts_client, cmd, positive_test, agent_server_ver,
exp_err=None):
if positive_test:
self.log.info("==(%s)Positive_test: %s, on %s", step, cmd, agent_server_ver)
else:
self.log.info("==(%s)Negative_test: %s, on %s", step, cmd, agent_server_ver)
return1 = run_pcmd(hosts_client, cmd)
if positive_test:
if return1[0]['exit_status']:
self.fail("##({0})Test failed, {1}, on {2}".format(step, cmd, agent_server_ver))
else:
self.log.info("-->return1= %s", return1)
if not return1[0]['exit_status']:
self.fail("##({0})Test failed, {1}, on {2}".format(step, cmd, agent_server_ver))
if exp_err not in return1[0]['stdout'][0]:
self.fail("##({0})Test failed, {1}, on {2}, expect_err {3} "
"not shown on stdout".format(step, cmd, agent_server_ver, exp_err))
self.log.info("==(%s)Test passed, %s, on %s", step, cmd, agent_server_ver) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_upgrade_shared_dependencies(self):\n result = self.run_cli_command(\"-s\", \"upgrade\", cwd=self._get_cwd())\n assert result.exit_code == 0\n\n agent_config: AgentConfig = cast(\n AgentConfig,\n load_item_config(PackageType.AGENT.value, Path(self.current_agent_context)),\n )\n assert OefSearchMessage.protocol_id in agent_config.protocols\n assert SOEF_PUBLIC_ID in agent_config.connections\n assert OEF_PUBLIC_ID in agent_config.connections",
"def diff_versions_agent_server(self):\n # (1)Setup\n self.log.info(\"==(1)Setup, create pool and container.\")\n hosts_client = self.hostlist_clients\n hosts_server = self.hostlist_servers\n all_hosts = include_local_host(hosts_server | hosts_client)\n self.upgrade_repo = self.params.get(\"upgrade_repo\", '/run/interop/*')\n self.downgrade_repo = self.params.get(\"downgrade_repo\", '/run/interop/*')\n self.add_pool(connect=False)\n pool_id = self.pool.identifier\n self.add_container(self.pool)\n self.container.open()\n cmd = \"dmg system query\"\n positive_test = True\n negative_test = False\n agent_server_ver = \"2.0 agent to 2.0 server\"\n self.verify_daos_libdaos(\"1.1\", hosts_client, cmd, positive_test, agent_server_ver)\n\n # (2)dmg system stop\n self.log.info(\"==(2)Dmg system stop.\")\n self.get_dmg_command().system_stop()\n errors = []\n errors.extend(self._stop_managers(self.server_managers, \"servers\"))\n errors.extend(self._stop_managers(self.agent_managers, \"agents\"))\n\n # (3)Upgrade 1 server-host to new\n self.log.info(\"==(3)Upgrade 1 server to 2.2.\")\n server = hosts_server[0:1]\n self.upgrade(server, [])\n self.log.info(\"==(3.1)server %s Upgrade to 2.2 completed.\", server)\n\n # (4)Negative test - dmg pool query on mix-version servers\n self.log.info(\"==(4)Negative test - dmg pool query on mix-version servers.\")\n agent_server_ver = \"2.0 agent, mix-version server-hosts\"\n cmd = \"dmg pool list\"\n exp_err = \"unable to contact the DAOS Management Service\"\n self.verify_daos_libdaos(\n \"4.1\", hosts_client, cmd, negative_test, agent_server_ver, exp_err)\n\n # (5)Upgrade rest server-hosts to 2.2\n server = hosts_server[1:len(hosts_server)]\n self.log.info(\"==(5) Upgrade rest server %s to 2.2.\", server)\n self.upgrade(server, [])\n self.log.info(\"==(5.1) server %s Upgrade to 2.2 completed.\", server)\n\n # (6)Restart 2.0 agent\n self.log.info(\"==(6)Restart 2.0 agent\")\n self._start_manager_list(\"agent\", self.agent_managers)\n self.show_daos_version(all_hosts, hosts_client)\n\n # (7)Verify 2.0 agent connect to 2.2 server\n self.log.info(\"==(7)Verify 2.0 agent connect to 2.2 server\")\n agent_server_ver = \"2.0 agent to 2.2 server\"\n cmd = \"daos pool query {0}\".format(pool_id)\n self.verify_daos_libdaos(\"7.1\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"dmg pool query {0}\".format(pool_id)\n exp_err = \"admin:0.0.0 are not compatible\"\n self.verify_daos_libdaos(\n \"7.2\", hosts_client, cmd, negative_test, agent_server_ver, exp_err)\n cmd = \"sudo daos_agent dump-attachinfo\"\n self.verify_daos_libdaos(\"7.3\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos cont create {0} --type POSIX --properties 'rf:2'\".format(pool_id)\n self.verify_daos_libdaos(\"7.4\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos pool autotest --pool {0}\".format(pool_id)\n self.verify_daos_libdaos(\"7.5\", hosts_client, cmd, positive_test, agent_server_ver)\n\n # (8)Upgrade agent to 2.2\n self.log.info(\"==(8)Upgrade agent to 2.2, now 2.2 servers 2.2 agent.\")\n self.upgrade([], hosts_client)\n self._start_manager_list(\"agent\", self.agent_managers)\n self.show_daos_version(all_hosts, hosts_client)\n\n # (9)Pool and containers create on 2.2 agent and server\n self.log.info(\"==(9)Create new pools and containers on 2.2 agent to 2.2 server\")\n agent_server_ver = \"2.2 agent to 2.2 server\"\n cmd = \"dmg pool create --size 5G New_pool1\"\n self.verify_daos_libdaos(\"9.1\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"dmg pool list\"\n self.verify_daos_libdaos(\"9.2\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos cont create New_pool1 C21 --type POSIX --properties 'rf:2'\"\n self.verify_daos_libdaos(\"9.3\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos cont create New_pool1 C22 --type POSIX --properties 'rf:2'\"\n self.verify_daos_libdaos(\"9.4\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos container list New_pool1\"\n self.verify_daos_libdaos(\"9.5\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"sudo daos_agent dump-attachinfo\"\n self.verify_daos_libdaos(\"9.6\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos pool autotest --pool New_pool1\"\n self.verify_daos_libdaos(\"9.7\", hosts_client, cmd, positive_test, agent_server_ver)\n\n # (10)Downgrade server to 2.0\n self.log.info(\"==(10)Downgrade server to 2.0, now 2.2 agent to 2.0 server.\")\n self.log.info(\"==(10.1)Dmg system stop.\")\n self.get_dmg_command().system_stop()\n errors = []\n errors.extend(self._stop_managers(self.server_managers, \"servers\"))\n errors.extend(self._stop_managers(self.agent_managers, \"agents\"))\n self.log.info(\"==(10.2)Downgrade server to 2.0\")\n self.downgrade(hosts_server, [])\n self.log.info(\"==(10.3)Restart 2.0 agent\")\n self._start_manager_list(\"agent\", self.agent_managers)\n self.show_daos_version(all_hosts, hosts_client)\n\n # (11)Verify 2.2 agent to 2.0 server\n agent_server_ver = \"2.2 agent to 2.0 server\"\n cmd = \"daos pool query {0}\".format(pool_id)\n self.verify_daos_libdaos(\"11.1\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"dmg pool query {0}\".format(pool_id)\n exp_err = \"does not match\"\n self.verify_daos_libdaos(\n \"11.2\", hosts_client, cmd, negative_test, agent_server_ver, exp_err)\n cmd = \"sudo daos_agent dump-attachinfo\"\n self.verify_daos_libdaos(\"11.3\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos cont create {0} 'C_oldP' --type POSIX --properties 'rf:2'\".format(\n pool_id)\n self.verify_daos_libdaos(\"11.4\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos cont create New_pool1 'C_newP' --type POSIX --properties 'rf:2'\"\n exp_err = \"DER_NO_SERVICE(-2039)\"\n self.verify_daos_libdaos(\n \"11.5\", hosts_client, cmd, negative_test, agent_server_ver, exp_err)\n exp_err = \"common ERR\"\n cmd = \"daos pool autotest --pool {0}\".format(pool_id)\n self.verify_daos_libdaos(\n \"11.6\", hosts_client, cmd, negative_test, agent_server_ver, exp_err)\n\n # (12)Downgrade agent to 2.0\n self.log.info(\"==(12)Agent %s Downgrade started.\", hosts_client)\n self.downgrade([], hosts_client)\n self.log.info(\"==Test passed\")",
"def verify_support():\n ostype, majorrelease, _ = get_os_release_data()\n if ostype not in _supported_os:\n _logger.info('OS type %s is not supported.', ostype)\n return False\n if majorrelease not in _supported_release:\n _logger.info('OS %s %s is not supported', ostype, majorrelease)\n return False\n return True",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_non_vendor_nothing_to_upgrade(\n self, *mocks\n ): # pylint: disable=unused-argument\n with cd(self.agent_name):\n base_agent_items = set(\n ItemRemoveHelper(self.load_mock_context())\n .get_agent_dependencies_with_reverse_dependencies()\n .keys()\n )\n\n self.runner.invoke( # pylint: disable=no-member\n cli,\n [\"--skip-consistency-check\", \"upgrade\"],\n standalone_mode=False,\n catch_exceptions=False,\n )\n agent_items = set(\n ItemRemoveHelper(self.load_mock_context())\n .get_agent_dependencies_with_reverse_dependencies()\n .keys()\n )\n assert base_agent_items == agent_items",
"def dependencies_met():\n # Check Java VM command line runner.\n try:\n Popen(['java'], shell=False, stderr=PIPE).communicate()[1]\n except:\n print 'Dependecy unmet. Java virtual machine command line runner not ' \\\n 'found.'\n return False\n # Check selenium-server.jar is ready to run.\n output = Popen(('java -jar %s -unrecognized_argument' % SELENIUM_RC_PATH\n ).split(), shell=False, stderr=PIPE).communicate()[1]\n if not re.search('Usage: java -jar selenium-server.jar', output):\n print 'Dependecy unmet. Selenium RC server (selenium-server.jar) not ' \\\n 'found.'\n return False\n # Check selenium RC python driver is available.\n try:\n import selenium\n except:\n print 'Dependecy unmet. Selenium RC python driver (selenium.py) not ' \\\n 'found.'\n return False\n # Check CherryPy wsgi server is available.\n try:\n import wsgiserver\n except:\n print 'Dependecy unmet. CherryPy wsgi server (wsgiserver.py) not found.'\n return False\n # Check fixture support is implemented for the database engine.\n if not settings.DATABASE_ENGINE in ['sqlite3', 'postgresql_psycopg2']:\n print 'Dependecy unmet. Fixture support for database engine %s not ' \\\n 'implemented.' % settings.DATABASE_ENGINE\n return False\n return True",
"def test_upgrade(self):\n with cd(self.latest_agent_name):\n latest_agent_items = set(\n ItemRemoveHelper(self.load_mock_context())\n .get_agent_dependencies_with_reverse_dependencies()\n .keys()\n )\n\n with cd(self.agent_name):\n self.runner.invoke( # pylint: disable=no-member\n cli,\n [\"--skip-consistency-check\", \"upgrade\", \"--local\"],\n standalone_mode=False,\n catch_exceptions=False,\n )\n agent_items = set(\n ItemRemoveHelper(self.load_mock_context())\n .get_agent_dependencies_with_reverse_dependencies()\n .keys()\n )\n assert latest_agent_items == agent_items\n\n # upgrade again to check it workd with upgraded version\n with cd(self.agent_name):\n self.runner.invoke( # pylint: disable=no-member\n cli,\n [\"--skip-consistency-check\", \"upgrade\", \"--local\"],\n standalone_mode=False,\n catch_exceptions=False,\n )\n agent_items = set(\n ItemRemoveHelper(self.load_mock_context())\n .get_agent_dependencies_with_reverse_dependencies()\n .keys()\n )\n assert latest_agent_items == agent_items\n\n # compare both configuration files, except the agent name and the author\n upgraded_agent_dir = Path(self.agent_name)\n latest_agent_dir = Path(self.latest_agent_name)\n lines_upgraded_agent_config = (\n (upgraded_agent_dir / DEFAULT_AEA_CONFIG_FILE).read_text().splitlines()\n )\n lines_latest_agent_config = (\n (latest_agent_dir / DEFAULT_AEA_CONFIG_FILE).read_text().splitlines()\n )\n # the slice is because we don't compare the agent name and the author name\n assert lines_upgraded_agent_config[2:] == lines_latest_agent_config[2:]\n\n # compare vendor folders.\n assert are_dirs_equal(\n upgraded_agent_dir / \"vendor\", latest_agent_dir / \"vendor\"\n )",
"def test_verify_connection_to_a_device():",
"def check_stability(self):",
"def test_denoiser_supported_version(self):\r\n\r\n pass_test = True\r\n try:\r\n check_flowgram_ali_exe()\r\n except (ApplicationNotFoundError, ApplicationError):\r\n pass_test = False\r\n\r\n self.assertTrue(pass_test,\r\n \"Denoiser flowgram aligner not found or not \"\r\n \"executable. This may or may not be a problem \"\r\n \"depending on which components of QIIME you plan to \"\r\n \"use.\")",
"def verify_client_run(self, exp_iface, env):\n hfi_map = {\"ib0\": \"hfi1_0\", \"ib1\": \"hfi1_1\"}\n\n # Get counter values for hfi devices before and after\n cnt_before = self.get_port_cnt(\n self.hostlist_clients, hfi_map[exp_iface], \"port_rcv_data\")\n\n # get the dmg config file for daos_racer\n dmg = self.get_dmg_command()\n\n # Let's run daos_racer as a client\n daos_racer = DaosRacerCommand(self.bin,\n self.hostlist_clients[0], dmg)\n daos_racer.get_params(self)\n\n # Update env_name list to add OFI_INTERFACE if needed.\n if env:\n daos_racer.update_env_names([\"OFI_INTERFACE\"])\n\n # Setup the environment and logfile\n logf = \"daos_racer_{}_{}.log\".format(exp_iface, env)\n\n # Add FI_LOG_LEVEL to get more info on device issues\n racer_env = daos_racer.get_environment(self.server_managers[0], logf)\n racer_env[\"FI_LOG_LEVEL\"] = \"info\"\n racer_env[\"D_LOG_MASK\"] = \"INFO,object=ERR,placement=ERR\"\n daos_racer.set_environment(racer_env)\n\n # Run client\n daos_racer.run()\n\n # Verify output and port count to check what iface CaRT init with.\n cnt_after = self.get_port_cnt(\n self.hostlist_clients, hfi_map[exp_iface], \"port_rcv_data\")\n\n diff = 0\n for cnt_b, cnt_a in zip(cnt_before.values(), cnt_after.values()):\n diff = int(cnt_a) - int(cnt_b)\n self.log.info(\"Port [%s] count difference: %s\", exp_iface, diff)\n\n # Read daos.log to verify device used and prevent false positives\n self.assertTrue(\n self.get_log_info(\n self.hostlist_clients, exp_iface, env, get_log_file(logf)))\n\n # If we don't see data going through the device, fail\n status = True\n if diff <= 0:\n self.log.info(\"No traffic seen through device: %s\", exp_iface)\n status = False\n else:\n status = True\n return status",
"def test_unix_client_system_connection(core_session, agent_enrolled_unix_system_with_users, proxy_start_stop):\n\n \"\"\"\n Testrail Link:\n https://testrail.centrify.com/index.php?/cases/view/1293084\n https://testrail.centrify.com/index.php?/cases/view/1293085\n https://testrail.centrify.com/index.php?/cases/view/1293086\n \"\"\"\n\n # verfiy the test is run with single thread.\n assert 'PYTEST_XDIST_WORKER_COUNT' not in os.environ, \\\n f'This test cannot be run with multiple threads due to starting and stopping connectors'\n\n enrolledsystems = agent_enrolled_unix_system_with_users\n session = enrolledsystems[0][\"Session\"]\n resourceid = enrolledsystems[0][\"ResourceId\"]\n proxyid = enrolledsystems[0][\"ProxyId\"]\n proxycontrol = proxy_start_stop\n\n logger.info(\"stop the agent\")\n ssh_manager.ssh_stop_agent(session)\n logger.info(\"start the connector\")\n proxycontrol(proxyid, True)\n\n logger.info(\"Testing connection to the computer, Connector is ready\")\n result, success = ResourceManager.get_system_health(core_session, resourceid)\n assert success and result == 'OK', f\"Unable to verify system is reachable {result} {success}\"\n\n # stop Conector , Should fail\n logger.info(\"Stopping the connector\")\n proxycontrol(proxyid, False)\n logger.info(\"Testing connection to the system\")\n result, success = ResourceManager.get_system_health(core_session, resourceid)\n assert success and result != 'OK', f\"cerify system is reachable {result} {success}\"\n\n # Start agent\n logger.info(\"Starting the agent\")\n ssh_manager.ssh_start_agent(session, True)\n logger.info(\"Testing connection to the computer, agent is available.\")\n result, success = ResourceManager.get_system_health(core_session, resourceid)\n assert success and result == 'OK', f\"Unable to verify system is reachable {result} {success}\"\n\n # verify account again, both connector and agent are running \n proxycontrol(proxyid, True)\n logger.info(\"Testing connection to the computer, both agent and connector are available\")\n result, success = ResourceManager.get_system_health(core_session, resourceid)\n assert success and result == 'OK', f\"Unable to verify system is reachable {result} {success}\"",
"async def test_dbus_osagent(\n os_agent_service: OSAgentService, dbus_session_bus: MessageBus\n):\n os_agent = OSAgent()\n\n assert os_agent.version is None\n assert os_agent.diagnostics is None\n\n await os_agent.connect(dbus_session_bus)\n\n assert os_agent.version == \"1.1.0\"\n assert os_agent.diagnostics\n\n os_agent_service.emit_properties_changed({\"Diagnostics\": False})\n await os_agent_service.ping()\n assert os_agent.diagnostics is False\n\n os_agent_service.emit_properties_changed({}, [\"Diagnostics\"])\n await os_agent_service.ping()\n await os_agent_service.ping()\n assert os_agent.diagnostics is True",
"def _check_compat(sock_info):\n ...",
"def version_check(self):\n param_name = \"rethink/software_version\"\n sdk_version = settings.SDK_VERSION\n\n # get local lock for rosparam threading bug\n with self.__class__.param_lock:\n robot_version = rospy.get_param(param_name, None)\n if not robot_version:\n rospy.logwarn(\"RobotEnable: Failed to retrieve robot version \"\n \"from rosparam: %s\\n\"\n \"Verify robot state and connectivity \"\n \"(i.e. ROS_MASTER_URI)\", param_name)\n return False\n else:\n # parse out first 3 digits of robot version tag\n pattern = (\"^([0-9]+)\\.([0-9]+)\\.([0-9]+)\")\n match = re.search(pattern, robot_version)\n if not match:\n rospy.logwarn(\"RobotEnable: Invalid robot version: %s\",\n robot_version)\n return False\n robot_version = match.string[match.start(1):match.end(3)]\n if robot_version not in settings.VERSIONS_SDK2ROBOT[sdk_version]:\n errstr_version = \"\"\"RobotEnable: Software Version Mismatch.\nRobot Software version (%s) does not match local SDK version (%s). Please\nUpdate your Robot Software. \\\nSee: http://sdk.rethinkrobotics.com/wiki/Software_Update\"\"\"\n rospy.logerr(errstr_version, robot_version, sdk_version)\n return False\n return True"
] | [
"0.6237617",
"0.61974806",
"0.5944649",
"0.58433425",
"0.58433425",
"0.58433425",
"0.58433425",
"0.58433425",
"0.58433425",
"0.58433425",
"0.58433425",
"0.58433425",
"0.58433425",
"0.58433425",
"0.58433425",
"0.58433425",
"0.58433425",
"0.58433425",
"0.58433425",
"0.56415945",
"0.56348205",
"0.5612916",
"0.55960155",
"0.5582666",
"0.55502415",
"0.5541414",
"0.553012",
"0.55223554",
"0.5500364",
"0.54910874"
] | 0.6391706 | 0 |
Check if RPMs with faultinjection function. | def has_fault_injection(self, hosts):
status = True
result = run_pcmd(hosts, "daos_debug_set_params -v 67174515")
self.log.info("--check_result, result= %s", result)
if result[0]['stdout'] == []:
self.log.info("#Host client rpms did not have fault-injection")
status = False
return status | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _isfault(self):\n return self.dp.state()==PyTango.DevState.FAULT",
"def check_page_faults(con, host, warning, critical,perf_data):\n warning = warning or 10\n critical = critical or 30\n data=get_server_status(con)\n\n try:\n page_faults=float(data['extra_info']['page_faults']) \n except:\n # page_faults unsupported on the underlaying system\n return exit_with_general_critical(\"page_faults unsupported on the underlaying system\")\n \n err,delta=maintain_delta([page_faults],host,\"page_faults\")\n if err==0:\n page_faults_ps=delta[1]/delta[0]\n message = \"Page faults : %.2f ps\" % page_faults_ps\n message+=performance_data(perf_data,[(\"%.2f\" %page_faults_ps,\"page_faults_ps\",warning,critical)])\n return check_levels(page_faults_ps,warning,critical,message)\n else:\n return exit_with_general_warning(\"problem reading data from temp file\")",
"def check_vulnerability(self):\n\t\tpass",
"def is_faulty(self, event):\n for each in self._faults:\n if each.name.upper() == event.name.upper():\n return True\n return False",
"def check():",
"def check_rpm_uninstall(self, rpm_package_name):\n with self.assertRaisesRegexp(ExecutionError, \"%s is not installed\" % rpm_package_name):\n run_command(\"rpm -q %s --dbpath %s\" % (rpm_package_name, RPM_DATABASE))",
"def _validate_pf(module):\n rc, out, err = module.run_command(['ls', '/sbin/pfctl'])\n\n # Validate exit code\n if rc != 0:\n msg_err = 'Error: Unable to find pfctl binary.'\n module.fail_json(msg=msg_err)",
"def panic_on_fault_enabled(self):\n # The panic_on_fault mechanism might not even be included in the build\n # (in which case the panic_on_fault variables won't exist), so be defensive.\n try:\n enabled = self.chipdata.get_var_strict(\n 'L_panic_on_fault_enabled'\n ).value\n fault_id = self.chipdata.get_var_strict(\n 'L_panic_on_fault_id'\n ).value\n except ct.DebugInfoNoVariable:\n enabled = False\n fault_id = 0\n return (enabled, fault_id)",
"def check_install():\n if platform.dist()[0] not in ['fedora', 'redhat', 'centos']:\n print \"{} not supported\".format(platform.dist()[0])\n sys.exit(1)\n print \"\\ndetected {} {} ...\".format(platform.dist()[0], platform.dist()[1])\n\n import yum\n # Remove loggin. Taken from: https://stackoverflow.com/a/46716482\n from yum.logginglevels import __NO_LOGGING\n yumloggers = [\n 'yum.filelogging.RPMInstallCallback', 'yum.verbose.Repos',\n 'yum.verbose.plugin', 'yum.Depsolve', 'yum.verbose', 'yum.plugin',\n 'yum.Repos', 'yum', 'yum.verbose.YumBase', 'yum.filelogging',\n 'yum.verbose.YumPlugins', 'yum.RepoStorage', 'yum.YumBase',\n 'yum.filelogging.YumBase', 'yum.verbose.Depsolve'\n ]\n for loggername in yumloggers:\n logger = logging.getLogger(loggername)\n logger.setLevel(__NO_LOGGING)\n\n yumbase = yum.YumBase()\n pkg = 'Percona-XtraDB-Cluster-server-<%= @percona_major_version %>'\n if yumbase.rpmdb.searchNevra(name=pkg):\n pkg_list = yumbase.rpmdb.searchNevra(name=pkg)\n print 'detected {} ...'.format(pkg_list[0])\n else:\n print \"{}{} not installed{}\".format(RED, pkg, WHITE)\n sys.exit(1)\n return 'percona'",
"def check_for_exposed(context):\n json_data = context.response.json()\n if \"exploitable_vulnerabilities_count\" in json_data:\n raise Exception(\"Field exploitable_vulnerabilities_count Exposed in\"\n \" Free user result\")\n if \"vendor_package_link\" in json_data:\n raise Exception(\"Field vendor_package_link has been exposed for free user\")",
"def try_mitigate_issues_if_any(self, command, code, out):\n if \"Error\" in out or \"Errno\" in out:\n issue_mitigated = self.check_known_issues_and_attempt_fix(out)\n if issue_mitigated:\n self.composite_logger.log_debug('\\nPost mitigation, invoking package manager again using: ' + command)\n code_after_fix_attempt, out_after_fix_attempt = self.env_layer.run_command_output(command, False, False)\n return self.try_mitigate_issues_if_any(command, code_after_fix_attempt, out_after_fix_attempt)\n return code, out",
"def ignore_silently(self):\n return self.fault_code in (17, 33, 48, 49)",
"def do_processes_require_restart(self):\n self.composite_logger.log_debug(\"Checking if process requires reboot\")\n # Checking using yum-utils\n self.composite_logger.log_debug(\"Ensuring yum-utils is present.\")\n code, out = self.env_layer.run_command_output(self.yum_utils_prerequisite, False, False) # idempotent, doesn't install if already present\n self.composite_logger.log_debug(\" - Code: \" + str(code) + \", Output: \\n|\\t\" + \"\\n|\\t\".join(out.splitlines()))\n\n # Checking for restart for distros with -r flag such as RHEL 7+\n code, out = self.env_layer.run_command_output(self.needs_restarting_with_flag, False, False)\n self.composite_logger.log_debug(\" - Code: \" + str(code) + \", Output: \\n|\\t\" + \"\\n|\\t\".join(out.splitlines()))\n if out.find(\"Reboot is required\") < 0:\n self.composite_logger.log_debug(\" - Reboot not detected to be required (L1).\")\n else:\n self.composite_logger.log_debug(\" - Reboot is detected to be required (L1).\")\n return True\n\n # Checking for restart for distro without -r flag such as RHEL 6 and CentOS 6\n if str(self.env_layer.platform.linux_distribution()[1]).split('.')[0] == '6':\n code, out = self.env_layer.run_command_output(self.needs_restarting, False, False)\n self.composite_logger.log_debug(\" - Code: \" + str(code) + \", Output: \\n|\\t\" + \"\\n|\\t\".join(out.splitlines()))\n if len(out.strip()) == 0 and code == 0:\n self.composite_logger.log_debug(\" - Reboot not detected to be required (L2).\")\n else:\n self.composite_logger.log_debug(\" - Reboot is detected to be required (L2).\")\n return True\n\n # Double-checking using yum ps (where available)\n self.composite_logger.log_debug(\"Ensuring yum-plugin-ps is present.\")\n code, out = self.env_layer.run_command_output(self.yum_ps_prerequisite, False, False) # idempotent, doesn't install if already present\n self.composite_logger.log_debug(\" - Code: \" + str(code) + \", Output: \\n|\\t\" + \"\\n|\\t\".join(out.splitlines()))\n\n output = self.invoke_package_manager(self.yum_ps)\n lines = output.strip().split('\\n')\n\n process_list_flag = False\n process_count = 0\n process_list_verbose = \"\"\n\n for line in lines:\n if not process_list_flag: # keep going until the process list starts\n if line.find(\"pid\") < 0 and line.find(\"proc\") < 0 and line.find(\"uptime\") < 0:\n self.composite_logger.log_debug(\" - Inapplicable line: \" + str(line))\n continue\n else:\n self.composite_logger.log_debug(\" - Process list started: \" + str(line))\n process_list_flag = True\n continue\n\n process_details = re.split(r'\\s+', line.strip())\n if len(process_details) < 7:\n self.composite_logger.log_debug(\" - Inapplicable line: \" + str(line))\n continue\n else:\n # The first string should be process ID and hence it should be integer.\n # If first string is not process ID then the line is not for a process detail.\n try:\n int(process_details[0])\n except Exception:\n self.composite_logger.log_debug(\" - Inapplicable line: \" + str(line))\n continue\n\n self.composite_logger.log_debug(\" - Applicable line: \" + str(line))\n process_count += 1\n process_list_verbose += process_details[1] + \" (\" + process_details[0] + \"), \" # process name and id\n\n self.composite_logger.log(\" - Processes requiring restart (\" + str(process_count) + \"): [\" + process_list_verbose + \"<eol>]\")\n return process_count != 0 # True if there were any",
"def check_remote_rpm_uninstall(self, rpm_package_name, host):\n with self.assertRaisesRegexp(ExecutionError, \"%s is not installed\" % rpm_package_name):\n results = run_remote_command(\"rpm -q %s --dbpath %s\" % (rpm_package_name, RPM_DATABASE), host)",
"def test_isort_tool_plugin_scan_oserror(mock_subprocess_check_output):\n mock_subprocess_check_output.side_effect = OSError(\"mocked error\")\n itp = setup_isort_tool_plugin()\n package = Package(\n \"valid_package\", os.path.join(os.path.dirname(__file__), \"valid_package\")\n )\n package[\"python_src\"] = [\n os.path.join(os.path.dirname(__file__), \"valid_package\", \"sample.py\")\n ]\n issues = itp.scan(package, \"level\")\n assert not issues",
"def test_10_unsupported_actions(self):\n\n def __count_pulled_packages(pth):\n self.pkgrepo(\"list -F tsv -H -s {0}\".format(pth))\n return len(self.output.splitlines())\n\n def __check_errout(pfmri):\n s1 = \"invalid action in package {0}\".format(pfmri)\n s2 = \"Malformed action in package '{0}'\".format(pfmri)\n self.assert_(s1 in self.errout or s2 in self.errout,\n \"{0} not in error\".format(pfmri))\n\n def __empty_repo(uri, arg_string):\n if uri.startswith(\"http://\"):\n rurl = self.dcs[4].get_repo_url()\n self.pkgrepo(\"remove -s {0} '*'\".format(rurl))\n # Refresh the depot to get it to realize that\n # the catalog has changed.\n self.dcs[4].refresh()\n elif arg_string:\n portable.remove(uri)\n else:\n self.pkgrepo(\"remove -s {0} '*'\".format(uri))\n\n\n def __test_rec(duri, arg_string, pfmris):\n self.debug(\"\\n\\nNow pkgrecv'ing to {0}\".format(duri))\n\n # It's necessary to use the -D option below because\n # otherwise pkgrecv will fail because the manifest\n # doesn't validate.\n\n novalidate = \"-D manifest_validate=Never \"\n # Check that invalid action attributes don't cause\n # tracebacks.\n self.pkgrecv(self.durl1, novalidate +\n \"-d {0} {1} {2}\".format(duri, arg_string,\n \" \".join(pfmris)), exit=pkgdefs.EXIT_OOPS)\n for pfmri in pfmris:\n __check_errout(pfmri)\n self.assertEqual(__count_pulled_packages(duri), 0)\n if arg_string:\n portable.remove(duri)\n\n self.pkgrecv(self.rurl1, novalidate +\n \"-d {0} {1} {2}\".format(duri, arg_string,\n \" \".join(pfmris)), exit=pkgdefs.EXIT_OOPS)\n for pfmri in pfmris:\n __check_errout(pfmri)\n self.assertEqual(__count_pulled_packages(duri), 0)\n if arg_string:\n portable.remove(duri)\n\n # Check that other packages are retrieved and the exit\n # code reflects partial success.\n self.pkgrecv(self.durl1, novalidate +\n \"-d {0} {1} -m all-timestamps '*'\".format(\n duri, arg_string), exit=pkgdefs.EXIT_PARTIAL)\n for pfmri in pfmris:\n __check_errout(pfmri)\n self.assertEqual(__count_pulled_packages(duri),\n len(self.published) - len(pfmris))\n __empty_repo(duri, arg_string)\n\n self.pkgrecv(self.rurl1, novalidate +\n \"-d {0} {1} -m all-timestamps '*'\".format(\n duri, arg_string), exit=pkgdefs.EXIT_PARTIAL)\n for pfmri in pfmris:\n __check_errout(pfmri)\n self.assertEqual(__count_pulled_packages(duri),\n len(self.published) - len(pfmris))\n __empty_repo(duri, arg_string)\n\n self.rurl1 = self.dcs[1].get_repo_url()\n repo = self.dcs[1].get_repo()\n rd = repo.get_pub_rstore()\n pfmri = fmri.PkgFmri(self.published[4])\n mp = rd.manifest(pfmri)\n\n with open(mp, \"rb\") as fh:\n original_txt = fh.read()\n txt = original_txt.replace(\"type=require\", \"type=foo\")\n with open(mp, \"wb\") as fh:\n fh.write(txt)\n\n rpth = tempfile.mkdtemp(dir=self.test_root)\n self.pkgrepo(\"create {0}\".format(rpth))\n adir = tempfile.mkdtemp(dir=self.test_root)\n\n # The __empty repo function above assumes that the only http uri\n # used is the one for depot number 4.\n dest_uris = ((rpth, \"\"), (self.durl4, \"\"),\n (os.path.join(adir, \"archive.p5p\"), \"-a\"))\n for duri, arg_string in dest_uris:\n __test_rec(duri, arg_string, [self.published[4]])\n\n # Test that multiple packages failing are handled correctly.\n for i in range(5, 7):\n pfmri = fmri.PkgFmri(self.published[i])\n mp = rd.manifest(pfmri)\n with open(mp, \"rb\") as fh:\n original_txt = fh.read()\n txt = \"foop\\n\" + original_txt\n with open(mp, \"wb\") as fh:\n fh.write(txt)\n\n for duri, arg_string, in dest_uris:\n __test_rec(duri, arg_string, self.published[4:7])",
"def check_reboot():\n return os.path.exist(\"run/reboot-required\")",
"def test_check_system_python_api(self):\n\n errors, successes = check_system.check_system()\n self.assertTrue(len(errors) + len(successes) >= 4)",
"def check_for_setup_error(self):\n lcfg = self.configuration\n\n self.zfssa.verify_pool(lcfg.zfssa_nfs_pool)\n self.zfssa.verify_project(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project)\n self.zfssa.verify_share(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project,\n lcfg.zfssa_nfs_share)\n self.zfssa.verify_service('http')\n self.zfssa.verify_service('nfs')",
"def error_check(command):\r\n\r\n # TODO\r",
"def has_scan_errors(digest, pid, api_key):\n url = \"https://connect.redhat.com/api/v2/container/{}/certResults/{}\".format(pid, digest)\n headers = {\"accept\": \"*/*\",\n \"Authorization\": \"Bearer {}\".format(api_key)}\n\n # Wait for HTTP 200 Response from the certification results endpoint\n response = None\n while response is None:\n intermediate_response = requests.get(url=url, headers=headers)\n if intermediate_response.status_code == 200:\n response = intermediate_response\n else:\n time.sleep(5)\n\n # Iterate over scan results, print them and search for erroneous states\n scan_err_present = False\n print(\"Scan of image '{}' complete.\".format(digest))\n for requirement, check_result in response.json()[\"data\"][\"results\"].items():\n print(\"{}: {}\".format(requirement, check_result))\n if not check_result:\n scan_err_present = True\n\n if scan_err_present:\n print(\"Scan errors found. Please address the issues and push the image again.\")\n\n return scan_err_present",
"def check_errors(self) -> None:",
"def handle_xmlfault(*params):\n\n def check_xmlfault(f):\n\n def protected_f(*args, **kwds):\n try:\n ret = f(*args, **kwds)\n except xmlrpclib.Fault, e:\n # rpc does not know Exceptions so they always come as pure\n # strings. One way would be to hack into the de-marshalling.\n # These seems easier and less intrusive.\n match = XMLFAULT.match(e.faultString)\n if match is None:\n raise\n else:\n groups = match.groupdict()\n cls = groups['class']\n fault = groups['fault']\n if cls == 'cpyrit.storage.DigestError':\n raise DigestError(fault)\n elif cls == 'cpyrit.storage.StorageError':\n raise StorageError(fault)\n else:\n raise\n return ret\n protected_f.func_name = f.func_name\n protected_f.__doc__ = f.__doc__\n return protected_f\n return check_xmlfault",
"def test_bad_func(self):\n\n mock_entry_badfunc = mock.create_autospec(EntryPoint)\n mock_entry_badfunc.name = \"BadFunc\"\n mock_entry_badfunc.load = self.returnbadfunc\n\n with pytest.warns(AstropyUserWarning, match=r\".*Class.*\"):\n populate_entry_points([mock_entry_badfunc])",
"def data_checker(xml):\n if not xml or 'response code=\"102\"' in xml:\n LOGGER.debug(\"The service 'oclc' is temporarily down!\")\n return False\n return True",
"def pre_upgrade_checks(self):\n\n #HostOverview\n Logger.info(\"******************************************************************************************************************************************************\")\n Logger.info(\"\\t\\t\\t\\t\\t\\t\\tHOST OVERVIEW\")\n Logger.info(\"******************************************************************************************************************************************************\")\n print (\"\\n\")\n Logger.info(\"Ambari version\\t\\t:{0}\".format(self.ambari_version))\n\n #Check OS\n os = platform.dist()\n if os[1] != None:\n Logger.info(\"Operating System\\t\\t:{0} {1} - {2}\".format(os[0],os[1],os[2]))\n else:\n Logger.error(\"Unable to fetch OS details.\")\n self.terminate()\n return\n\n self.check_java_version()\n self.check_exactly_one_current_version()\n\n\n #Check if rack awareness is enabled ?\n rack_awareness = \"SELECT DISTINCT rack_info FROM hosts WHERE rack_info!='/default-rack';\"\n self.cursor.execute(rack_awareness)\n result = self.cursor.fetchone()\n if result is None or len(result) != 1:\n Logger.info(\"Rack Awareness ?\\t\\tNo\\n\")\n else:\n Logger.info(\"Rack Awareness ?\\t\\tYes\\n\")\n\n #Security Overview\n self.check_security()\n\n #Check High Availability configuration\n self.check_high_availability()\n\n #Check Metastores\n self.check_metastore()",
"def check_reboot():\n return os.path.exists(\"/run/reboot-required\")",
"def test_isort_tool_plugin_scan_calledprocesserror(mock_subprocess_check_output):\n mock_subprocess_check_output.side_effect = subprocess.CalledProcessError(\n 0, \"\", output=\"mocked error\"\n )\n itp = setup_isort_tool_plugin()\n package = Package(\n \"valid_package\", os.path.join(os.path.dirname(__file__), \"valid_package\")\n )\n package[\"python_src\"] = [\n os.path.join(os.path.dirname(__file__), \"valid_package\", \"sample.py\")\n ]\n issues = itp.scan(package, \"level\")\n assert len(issues) == 1",
"def _check_vmware():\n for proc in psutil.process_iter():\n try:\n if proc.name() == 'vmware-vmx':\n raise CommandError('S2E uses KVM to build images. VMware '\n 'is currently running, which is not '\n 'compatible with KVM. Please close all '\n 'VMware VMs and try again.')\n except NoSuchProcess:\n pass",
"def __verify__(cls):\n\n try:\n UpstartSystem()\n return True\n except Exception as e:\n try:\n UpstartSystem(bus=DirectUpstartBus())\n return True\n except Exception as e:\n return False"
] | [
"0.5820415",
"0.5700631",
"0.5693373",
"0.5601475",
"0.5494928",
"0.5375606",
"0.5362572",
"0.5336421",
"0.5294676",
"0.52919555",
"0.52540994",
"0.5233826",
"0.5226672",
"0.52215457",
"0.52044386",
"0.5198198",
"0.5196629",
"0.5195433",
"0.5191071",
"0.51856494",
"0.51808774",
"0.51713336",
"0.5169687",
"0.51534456",
"0.51214355",
"0.5118618",
"0.5113389",
"0.5098093",
"0.5085137",
"0.5084511"
] | 0.66173726 | 0 |
Enable and disable fault injection. | def enable_disable_fault_injection(self, hosts, enable=True):
if enable:
result = run_pcmd(hosts, "daos_debug_set_params -v 67174515")
else:
result = run_pcmd(hosts, "daos_debug_set_params -v 67108864")
self.check_result(result) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fault_debug(value: bool = False) -> None:",
"def firewallOff():\n pass",
"def panic_on_fault_enabled(self):\n # The panic_on_fault mechanism might not even be included in the build\n # (in which case the panic_on_fault variables won't exist), so be defensive.\n try:\n enabled = self.chipdata.get_var_strict(\n 'L_panic_on_fault_enabled'\n ).value\n fault_id = self.chipdata.get_var_strict(\n 'L_panic_on_fault_id'\n ).value\n except ct.DebugInfoNoVariable:\n enabled = False\n fault_id = 0\n return (enabled, fault_id)",
"def disabled(config):\n disable(config)\n reload_service('apache2')",
"def _disable(self):\n self.debug_log(\"Disabling...\")\n self._unregister_handlers()",
"def disable_discovery(self):",
"def _injectFault(self, regName, regAddress, mask):\n try:\n self.target.halt()\n current = self.target.readMemory(regAddress)\n logging.debug(\"Current {}: 0x{:X}\".format(regName,current))\n newContent = current ^ mask\n logging.debug(\"New content to be written: 0x%X\" % newContent)\n self.target.writeMemory(regAddress, newContent)\n logging.log(25, \"Successfully injected fault into %s\" % regName)\n except Exception as e:\n logging.error(\"Failed to inject a fault into {}!\\n{}\".format(regName,e))\n finally:\n self.target.resume()",
"def ignore_silently(self):\n return self.fault_code in (17, 33, 48, 49)",
"def disable(self):\n self.registrar.unregister_service(\"say\", namespace=__name__)",
"def fault():\n return FaultCohesiveKin()",
"def disable(self):\n self.registrar.unregister_service(\"map\", namespace=__name__)\n self.registrar.unregister_service(\"directions\", namespace=__name__)",
"def toggleCounterEnable(self):\n mask = 1\n self._injectFault(\"PWM1TCR\", self.TCR, mask)",
"def off_hook(self) -> None:",
"def _isfault(self):\n return self.dp.state()==PyTango.DevState.FAULT",
"def __enable__(self) -> None:\n pass",
"def disable(self):\n LOGGER.info('Disabling WebAPI plugin WEB ...')",
"def _extend_fault_map(self):\n faults.FAULT_MAP.update({nsx_lib_exc.ManagerError:\n webob.exc.HTTPBadRequest,\n nsx_lib_exc.ServiceClusterUnavailable:\n webob.exc.HTTPServiceUnavailable,\n nsx_lib_exc.ClientCertificateNotTrusted:\n webob.exc.HTTPBadRequest,\n nsx_exc.SecurityGroupMaximumCapacityReached:\n webob.exc.HTTPBadRequest,\n nsx_lib_exc.NsxLibInvalidInput:\n webob.exc.HTTPBadRequest,\n nsx_exc.NsxENSPortSecurity:\n webob.exc.HTTPBadRequest,\n nsx_exc.NsxPluginTemporaryError:\n webob.exc.HTTPServiceUnavailable\n })",
"def __disable__(self) -> None:\n pass",
"def disable(ctx):\n fc_info = {}\n fc_info['FLEX_COUNTER_STATUS'] = 'disable'\n ctx.obj.mod_entry(\"FLEX_COUNTER_TABLE\", \"FLOW_CNT_TRAP\", fc_info)",
"def _enable(self):\n self.debug_log(\"Enabling...\")\n self._register_handlers()",
"def device_bypass(self, device_ids, enable):\n return self._device_action(device_ids, \"BYPASS\", self._action_toggle(enable))",
"def disable():\n if _status_apf():\n return __apf_cmd(\"-f\")",
"def disable(self) -> None:",
"def disable(self):\n self.error_code = 'DISABLED'\n self.running = False",
"def has_fault_injection(self, hosts):\n status = True\n result = run_pcmd(hosts, \"daos_debug_set_params -v 67174515\")\n self.log.info(\"--check_result, result= %s\", result)\n if result[0]['stdout'] == []:\n self.log.info(\"#Host client rpms did not have fault-injection\")\n status = False\n return status",
"def disable(self):",
"def disable_irq() -> int:",
"def _add_faults(self, feature_builder, features=None):\n if features is None:\n features = self.features\n for f in reversed(features):\n if f.type == 'fault':\n feature_builder.add_fault(f)\n # if f.type == 'unconformity':\n # break",
"def _force_off(self):\n self._interface.set('fw_wp_vref', self._fw_wp_vref)\n self._interface.set('fw_wp_en', 'on')\n self._interface.set('fw_wp', 'off')",
"def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n tunnel_info = {}\n tunnel_info['FLEX_COUNTER_STATUS'] = DISABLE\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"TUNNEL\", tunnel_info)"
] | [
"0.5727856",
"0.55507016",
"0.5469473",
"0.5432097",
"0.54003435",
"0.53948396",
"0.52492994",
"0.51916826",
"0.51761824",
"0.51750493",
"0.51594543",
"0.5149204",
"0.5145031",
"0.51321125",
"0.51234275",
"0.5111981",
"0.5108552",
"0.50997925",
"0.50989044",
"0.50777584",
"0.50654113",
"0.50512046",
"0.49956825",
"0.49527553",
"0.4947444",
"0.49226227",
"0.49218795",
"0.48961076",
"0.48929265",
"0.4891433"
] | 0.6993913 | 0 |
Verify pool upgrade status. | def verify_pool_upgrade_status(self, pool_id, expected_status):
prop_value = self.get_dmg_command().pool_get_prop(
pool_id, "upgrade_status")['response'][0]['value']
if prop_value != expected_status:
self.fail("##prop_value != expected_status {}".format(expected_status)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def verify_package_status(self):\n pass",
"def verify_package_status(self):\n pass",
"def verify_pool(self, pool):\n svc = self.pool_path % pool\n self.rest_get(svc, restclient.Status.OK)",
"def test_version_check_update_available(self):\n output = self.run_command(\"selfupdate --check bennr01:selfupdate_test_future\", exitcode=0)\n self.assertIn(\"Target: bennr01:selfupdate_test_future\", output)\n self.assertNotIn(\"Target: ywangd:master\", output)\n self.assertNotIn(\"Already at latest version\", output)\n self.assertIn(\"New version available\", output)\n self.assertNotIn(\"Error: \", output)",
"def pool_upgrade_with_fault(self, hosts, pool_id):\n # Verify pool status before upgrade\n expected_status = \"not started\"\n self.verify_pool_upgrade_status(pool_id, expected_status)\n\n # Enable fault-injection\n self.enable_disable_fault_injection(hosts, enable=True)\n\n # Pool upgrade\n result = run_pcmd(hosts, \"dmg pool upgrade {}\".format(pool_id))\n self.check_result(result)\n # Verify pool status during upgrade\n expected_status = \"in progress\"\n self.verify_pool_upgrade_status(pool_id, expected_status)\n # Verify pool status during upgrade\n expected_status = \"failed\"\n self.verify_pool_upgrade_status(pool_id, expected_status)\n\n # Disable fault-injection\n self.enable_disable_fault_injection(hosts, enable=False)\n # Verify pool upgrade resume after removal of fault-injection\n expected_status = \"completed\"\n self.verify_pool_upgrade_status(pool_id, expected_status)",
"def pre_upgrade_checks(self):\n\n #HostOverview\n Logger.info(\"******************************************************************************************************************************************************\")\n Logger.info(\"\\t\\t\\t\\t\\t\\t\\tHOST OVERVIEW\")\n Logger.info(\"******************************************************************************************************************************************************\")\n print (\"\\n\")\n Logger.info(\"Ambari version\\t\\t:{0}\".format(self.ambari_version))\n\n #Check OS\n os = platform.dist()\n if os[1] != None:\n Logger.info(\"Operating System\\t\\t:{0} {1} - {2}\".format(os[0],os[1],os[2]))\n else:\n Logger.error(\"Unable to fetch OS details.\")\n self.terminate()\n return\n\n self.check_java_version()\n self.check_exactly_one_current_version()\n\n\n #Check if rack awareness is enabled ?\n rack_awareness = \"SELECT DISTINCT rack_info FROM hosts WHERE rack_info!='/default-rack';\"\n self.cursor.execute(rack_awareness)\n result = self.cursor.fetchone()\n if result is None or len(result) != 1:\n Logger.info(\"Rack Awareness ?\\t\\tNo\\n\")\n else:\n Logger.info(\"Rack Awareness ?\\t\\tYes\\n\")\n\n #Security Overview\n self.check_security()\n\n #Check High Availability configuration\n self.check_high_availability()\n\n #Check Metastores\n self.check_metastore()",
"def check_status(self):",
"def updatecheck(self):\n self.comp('packmanager').updatecheck_allpacks()",
"def check_update():\n try:\n raw_version = urllib.urlopen(VERSIONFILE)\n except IOError as e:\n print UPDATE_FAIL + \"can't fetch version file: \" + str(e)\n else:\n if raw_version.getcode() == 200:\n remote_version = raw_version.read().rstrip()\n if remote_version != VERSION:\n print(UPDATE_WARN + \"version \" + remote_version + \" is available, you have version \"\n + VERSION + \"\\n\\t\" + \"to update run: \" + UPDATECOMMAND)\n else:\n print UPDATE_FAIL + \"can't fetch version file\"",
"def test_upgrade_with_auto_upgrade_latest_engine_enabled():",
"def test_wait_for_upgrade(self):\n self.run_test_suites(self.wait_for_upgrade_test_suite_list)",
"def check(self):\n current = self._get_current()\n # There is no version, so don't attempt to upgrade\n if current[-1]:\n return False\n\n highest = self._get_highest_version()\n return highest > current",
"def check_availability(self):\n pass",
"def test_03_verify_upgraded_ipv6_network(self):\n\n self.createIpv4NetworkOffering(False)\n self.createIpv6NetworkOfferingForUpdate(False)\n self.createTinyServiceOffering()\n self.prepareRoutingTestResourcesInBackground()\n self.deployNetwork()\n self.deployNetworkVm()\n self.updateNetworkWithOffering()\n self.checkIpv6Network()\n self.checkIpv6NetworkVm()\n self.restartNetworkWithCleanup()\n self.checkIpv6Network()\n self.checkIpv6NetworkRouting()\n self.checkIpv6FirewallRule()",
"def check_updates(self):\n try:\n if not common.latest_version(version):\n self.update_notify()\n except:\n self.neterror()",
"def test_do_upgrade(self):\n with self.with_config_update():\n result = self.runner.invoke(\n cli,\n [\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:latest\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 0",
"def test_version_check_outdated(self):\n output = self.run_command(\"selfupdate --check bennr01:selfupdate_test_outdated\", exitcode=0)\n self.assertIn(\"Target: bennr01:selfupdate_test_outdated\", output)\n self.assertNotIn(\"Target: ywangd:master\", output)\n self.assertIn(\"Already at latest version\", output)\n self.assertNotIn(\"New version available\", output)\n self.assertNotIn(\"Error: \", output)",
"def _check_upgrade(dbapi, host_obj=None):\n is_upgrading, upgrade = is_upgrade_in_progress(dbapi)\n if is_upgrading:\n if host_obj:\n if host_obj.created_at > upgrade.created_at:\n LOG.info(\"New host %s created after upgrade, allow partition\" %\n host_obj.hostname)\n return\n\n raise wsme.exc.ClientSideError(\n _(\"ERROR: Disk partition operations are not allowed during a \"\n \"software upgrade. Try again after the upgrade is completed.\"))",
"def is_upgrade_in_progress(dbapi):\n try:\n upgrade = dbapi.software_upgrade_get_one()\n LOG.debug(\"Platform Upgrade in Progress: state=%s\" % upgrade.state)\n return True, upgrade\n except exception.NotFound:\n LOG.debug(\"No Platform Upgrades in Progress\")\n return False, None",
"def test_upgrade_plan_all_fine(setup, skuba):\n\n setup_kubernetes_version(skuba)\n out = skuba.cluster_upgrade_plan()\n\n assert out.find(\n \"Congratulations! You are already at the latest version available\"\n ) != -1",
"def test_up_to_date(self):\n last_public_release = get_pypi_version()\n self.assertFalse(update_available(last_public_release))",
"def _checkUpdateNeeded(self):\n try:\n currentVersionLine = str(subprocess.run(['pacman', '-Q', '-i', self._name],\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=True).stdout)\n currentVersion = re.sub(r'.*Version\\s*: ([\\d|\\.]*)-.*', r'\\1', currentVersionLine).split('.')\n newVersion = self._version.split('.')\n for i in range(0, min(len(currentVersion), len(newVersion))):\n if currentVersion[i].isdigit():\n # TODO: test if new version is only digits too, two of them should be the same anyway\n if int(newVersion[i]) > int(currentVersion[i]):\n return True\n if int(newVersion[i]) < int(currentVersion[i]):\n return False\n return len(newVersion) > len(currentVersion)\n except subprocess.CalledProcessError:\n # Package not found: to be installed then\n return True",
"def test_update_availability(self):\n\n # Create an analytics integration so we can make sure\n # events are tracked.\n integration, ignore = create(\n self._db, ExternalIntegration,\n goal=ExternalIntegration.ANALYTICS_GOAL,\n protocol=\"core.local_analytics_provider\",\n )\n\n # Create a LicensePool that needs updating.\n edition, pool = self._edition(\n identifier_type=Identifier.THREEM_ID,\n data_source_name=DataSource.THREEM,\n with_license_pool=True,\n collection=self.collection\n )\n\n # We have never checked the circulation information for this\n # LicensePool. Put some random junk in the pool to verify\n # that it gets changed.\n pool.licenses_owned = 10\n pool.licenses_available = 5\n pool.patrons_in_hold_queue = 3\n eq_(None, pool.last_checked)\n\n # Prepare availability information.\n data = self.sample_data(\"item_circulation_single.xml\")\n # Change the ID in the test data so it looks like it's talking\n # about the LicensePool we just created.\n data = data.replace(\"d5rf89\", pool.identifier.identifier)\n\n # Update availability using that data.\n self.api.queue_response(200, content=data)\n self.api.update_availability(pool)\n\n # The availability information has been updated, as has the\n # date the availability information was last checked.\n eq_(1, pool.licenses_owned)\n eq_(1, pool.licenses_available)\n eq_(0, pool.patrons_in_hold_queue)\n\n circulation_events = self._db.query(CirculationEvent).join(LicensePool).filter(LicensePool.id==pool.id)\n eq_(3, circulation_events.count())\n types = [e.type for e in circulation_events]\n eq_(sorted([CirculationEvent.DISTRIBUTOR_LICENSE_REMOVE,\n CirculationEvent.DISTRIBUTOR_CHECKOUT,\n CirculationEvent.DISTRIBUTOR_HOLD_RELEASE]),\n sorted(types))\n\n old_last_checked = pool.last_checked\n assert old_last_checked is not None\n\n # Now let's try update_availability again, with a file that\n # makes it look like the book has been removed from the\n # collection.\n data = self.sample_data(\"empty_item_circulation.xml\")\n self.api.queue_response(200, content=data)\n\n self.api.update_availability(pool)\n\n eq_(0, pool.licenses_owned)\n eq_(0, pool.licenses_available)\n eq_(0, pool.patrons_in_hold_queue)\n\n assert pool.last_checked is not old_last_checked\n\n circulation_events = self._db.query(CirculationEvent).join(LicensePool).filter(LicensePool.id==pool.id)\n eq_(5, circulation_events.count())",
"def test_new_upgrade_pending(\n mocker, state, slack, ouw_oc_map, ouw_ocm_map, upgrade_config, dt\n):\n dt.utcnow.return_value = upgrade_at - timedelta(hours=1)\n gso = mocker.patch(\n \"reconcile.openshift_upgrade_watcher._get_start_osd\", autospec=True\n )\n gso.return_value = upgrade_at.strftime(\"%Y-%m-%dT%H:%M:%SZ\"), upgrade_version\n ouw.notify_upgrades_start(\n ocm_map=ouw_ocm_map,\n oc_map=ouw_oc_map,\n clusters=[load_cluster(\"cluster1.yml\")],\n state=state,\n slack=slack,\n )\n assert slack.chat_post_message.call_count == 0\n assert state.add.call_count == 0",
"def check_state(self):\n pass",
"def test_update_offline_status(self):\n pass",
"def validate_tls_min_version(self, node=None, version=\"1.2\", expect=\"fail\"):\n if node is None:\n node = self.cluster.master\n cmd = self.curl_path + \" -v --tlsv\" + version + \" --tls-max \" + version + \\\n \" -u \" + node.rest_username + \":\" + node.rest_password + \\\n \" https://\" + node.ip + \":18091/pools/ -k\"\n shell = RemoteMachineShellConnection(node)\n o, e = shell.execute_command(cmd)\n if expect == \"fail\":\n if len(o) != 0:\n shell.disconnect()\n self.fail(\"Command worked when it should have failed\")\n else:\n if len(o) == 0 or \"pools\" not in o[0]:\n shell.disconnect()\n self.fail(\"Command failed when it should have worked\")\n shell.disconnect()",
"def check(self):\n if self.backend.poll():\n raise RuntimeError('Backend process died.')\n\n if self.esp.poll():\n raise RuntimeError('ESP process died.')",
"def checkVersion(self):\n try:\n respInfo = self._reqSession.get(self._host + \"/static/pythonSDKVersion.txt\")\n if respInfo.status_code != 200 or len(respInfo.text) > 20:\n return\n latestVersion = respInfo.text.strip()\n import eventregistry._version as _version\n currentVersion = _version.__version__\n for (latest, current) in zip(latestVersion.split(\".\"), currentVersion.split(\".\")):\n if int(latest) > int(current):\n logger.info(\"==============\\nYour version of the module is outdated, please update to the latest version\")\n logger.info(\"Your version is %s while the latest is %s\", currentVersion, latestVersion)\n logger.info(\"Update by calling: pip install --upgrade eventregistry\\n==============\")\n return\n # in case the server mistakenly has a lower version that the user has, don't report an error\n elif int(latest) < int(current):\n return\n except:\n pass",
"def test_new_version_no_op(mocker, state, slack, clusters):\n state.exists.return_value = True\n state.get.return_value = upgrade_version # same version, already notified\n ouw.notify_cluster_new_version(clusters, state=state, slack=slack)\n assert slack.chat_post_message.call_count == 0\n assert state.add.call_count == 0"
] | [
"0.6395156",
"0.6395156",
"0.6306147",
"0.6207196",
"0.62052345",
"0.5972069",
"0.5907427",
"0.5898982",
"0.5890571",
"0.5863571",
"0.58613586",
"0.58581054",
"0.583659",
"0.5818412",
"0.5748545",
"0.5742679",
"0.5731763",
"0.5725801",
"0.56647915",
"0.5657188",
"0.56565666",
"0.56484735",
"0.5640938",
"0.5637044",
"0.5601428",
"0.558917",
"0.55842215",
"0.5574557",
"0.55555284",
"0.5554768"
] | 0.7941663 | 0 |
Execute dmg pool upgrade with fault injection. | def pool_upgrade_with_fault(self, hosts, pool_id):
# Verify pool status before upgrade
expected_status = "not started"
self.verify_pool_upgrade_status(pool_id, expected_status)
# Enable fault-injection
self.enable_disable_fault_injection(hosts, enable=True)
# Pool upgrade
result = run_pcmd(hosts, "dmg pool upgrade {}".format(pool_id))
self.check_result(result)
# Verify pool status during upgrade
expected_status = "in progress"
self.verify_pool_upgrade_status(pool_id, expected_status)
# Verify pool status during upgrade
expected_status = "failed"
self.verify_pool_upgrade_status(pool_id, expected_status)
# Disable fault-injection
self.enable_disable_fault_injection(hosts, enable=False)
# Verify pool upgrade resume after removal of fault-injection
expected_status = "completed"
self.verify_pool_upgrade_status(pool_id, expected_status) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def upgrade_and_downgrade(self, fault_on_pool_upgrade=False):\n # (1)Setup\n self.log.info(\"(1)==Setup and show rpm, dmg and daos versions on all hosts.\")\n hosts_client = self.hostlist_clients\n hosts_server = self.hostlist_servers\n all_hosts = include_local_host(hosts_server)\n self.upgrade_repo = self.params.get(\"upgrade_repo\", '/run/interop/*')\n self.downgrade_repo = self.params.get(\"downgrade_repo\", '/run/interop/*')\n num_attributes = self.params.get(\"num_attributes\", '/run/attrtests/*')\n ior_api = self.params.get(\"api\", '/run/ior/*')\n mount_dir = self.params.get(\"mount_dir\", '/run/dfuse/*')\n self.show_daos_version(all_hosts, hosts_client)\n\n # (2)Create pool container and pool attributes\n self.log.info(\"(2)==Create pool attributes.\")\n self.add_pool(connect=False)\n pool_id = self.pool.identifier\n self.add_container(self.pool)\n self.container.open()\n self.daos_cmd = self.get_daos_command()\n pool_attr_dict = self.create_data_set(num_attributes)\n self.pool.pool.set_attr(data=pool_attr_dict)\n self.verify_pool_attrs(pool_attr_dict)\n self.container.close()\n self.pool.disconnect()\n\n # (3)Setup and run IOR\n self.log.info(\"(3)==Setup and run IOR.\")\n result = run_pcmd(hosts_client, \"mkdir -p {}\".format(mount_dir))\n ior_timeout = self.params.get(\"ior_timeout\", '/run/ior/*')\n iorflags_write = self.params.get(\"write_flg\", '/run/ior/iorflags/*')\n iorflags_read = self.params.get(\"read_flg\", '/run/ior/iorflags/*')\n testfile = os.path.join(mount_dir, \"testfile\")\n testfile_sav = os.path.join(mount_dir, \"testfile_sav\")\n testfile_sav2 = os.path.join(mount_dir, \"testfile_sav2\")\n symlink_testfile = os.path.join(mount_dir, \"symlink_testfile\")\n # (3.a)ior dfs\n if ior_api in (\"DFS\", \"POSIX\"):\n self.log.info(\"(3.a)==Run non-HDF5 IOR write and read.\")\n self.ior_cmd.flags.update(iorflags_write)\n self.run_ior_with_pool(\n timeout=ior_timeout, create_pool=True, create_cont=True, stop_dfuse=False)\n self.ior_cmd.flags.update(iorflags_read)\n self.run_ior_with_pool(\n timeout=ior_timeout, create_pool=False, create_cont=False, stop_dfuse=False)\n\n # (3.b)ior hdf5\n elif ior_api == \"HDF5\":\n self.log.info(\"(3.b)==Run IOR HDF5 write and read.\")\n hdf5_plugin_path = self.params.get(\"plugin_path\", '/run/hdf5_vol/')\n self.ior_cmd.flags.update(iorflags_write)\n self.run_ior_with_pool(\n plugin_path=hdf5_plugin_path, mount_dir=mount_dir,\n timeout=ior_timeout, create_pool=True, create_cont=True, stop_dfuse=False)\n self.ior_cmd.flags.update(iorflags_read)\n self.run_ior_with_pool(\n plugin_path=hdf5_plugin_path, mount_dir=mount_dir,\n timeout=ior_timeout, create_pool=False, create_cont=False, stop_dfuse=False)\n else:\n self.fail(\"##(3)Unsupported IOR api {}\".format(ior_api))\n\n # (3.c)ior posix test file with symlink\n if ior_api == \"POSIX\":\n self.log.info(\"(3.c)==Symlink mounted testfile.\")\n result = run_pcmd(hosts_client, \"cd {}\".format(mount_dir))\n result = run_pcmd(hosts_client, \"ls -l {}\".format(testfile))\n result = run_pcmd(hosts_client, \"cp {0} {1}\".format(testfile, testfile_sav))\n self.check_result(result)\n result = run_pcmd(hosts_client, \"cp {0} {1}\".format(testfile, testfile_sav2))\n self.check_result(result)\n result = run_pcmd(\n hosts_client, \"ln -vs {0} {1}\".format(testfile_sav2, symlink_testfile))\n result = run_pcmd(hosts_client, \"diff {0} {1}\".format(testfile, testfile_sav))\n self.check_result(result)\n result = run_pcmd(hosts_client, \"ls -l {}\".format(symlink_testfile))\n self.check_result(result)\n self.container.close()\n self.pool.disconnect()\n result = run_pcmd(hosts_client, \"fusermount3 -u {}\".format(mount_dir))\n self.check_result(result)\n\n # Verify pool attributes before upgrade\n self.log.info(\"(3.2)==verify pool attributes before upgrade.\")\n self.verify_pool_attrs(pool_attr_dict)\n\n # (4)dmg system stop\n self.log.info(\"(4)==Dmg system stop.\")\n self.get_dmg_command().system_stop()\n errors = []\n errors.extend(self._stop_managers(self.server_managers, \"servers\"))\n errors.extend(self._stop_managers(self.agent_managers, \"agents\"))\n\n # (5)Upgrade\n self.log.info(\"(5)==Upgrade RPMs to 2.2.\")\n self.upgrade(hosts_server, hosts_client)\n\n self.log.info(\"==sleeping 30 more seconds\")\n time.sleep(30)\n # (6)Restart servers\n self.log.info(\"(6)==Restart servers.\")\n self.restart_servers()\n\n # (7)Verification after upgrade\n # Restart agent\n self.log.info(\"(7.1)====Restarting rel_2.2 agent after upgrade.\")\n self._start_manager_list(\"agent\", self.agent_managers)\n self.show_daos_version(all_hosts, hosts_client)\n\n self.get_dmg_command().pool_list(verbose=True)\n self.get_dmg_command().pool_query(pool=pool_id)\n self.daos_cmd.pool_query(pool=pool_id)\n\n # Verify pool attributes\n self.log.info(\"(7.2)====Verifying pool attributes after upgrade.\")\n self.verify_pool_attrs(pool_attr_dict)\n self.daos_ver_after_upgraded(hosts_client)\n\n # Verify IOR data and symlink\n self.log.info(\"(7.3)====Verifying container data IOR read.\")\n if ior_api == \"DFS\":\n self.log.info(\"(7.a)==Run IOR DFS read verification.\")\n self.run_ior_with_pool(\n timeout=ior_timeout, create_pool=False, create_cont=False, stop_dfuse=False)\n elif ior_api == \"HDF5\":\n self.log.info(\"(7.b)==Run IOR HDF5 read verification.\")\n self.run_ior_with_pool(\n plugin_path=hdf5_plugin_path, mount_dir=mount_dir,\n timeout=ior_timeout, create_pool=False, create_cont=False, stop_dfuse=False)\n else:\n self.log.info(\"(7.c)==Run Symlink check after upgraded.\")\n result = run_pcmd(\n hosts_client,\n \"dfuse --mountpoint {0} --pool {1} --container {2}\".format(\n mount_dir, pool_id, self.container))\n self.check_result(result)\n result = run_pcmd(hosts_client, \"diff {0} {1}\".format(testfile, testfile_sav))\n self.check_result(result)\n result = run_pcmd(hosts_client, \"diff {0} {1}\".format(symlink_testfile, testfile_sav2))\n self.check_result(result)\n\n # (8)Dmg pool get-prop\n self.log.info(\"(8)==Dmg pool get-prop after RPMs upgraded before Pool upgraded\")\n result = run_pcmd(hosts_client, \"dmg pool get-prop {}\".format(pool_id))\n self.check_result(result)\n\n # (9)Pool property verification after upgraded\n self.log.info(\"(9)==Dmg pool upgrade and get-prop after RPMs upgraded\")\n\n if fault_on_pool_upgrade and self.has_fault_injection(hosts_client):\n self.log.info(\"(9.1a)==Pool upgrade with fault-injection.\")\n self.pool_upgrade_with_fault(hosts_client, pool_id)\n else:\n self.log.info(\"(9.1b)==Pool upgrade.\")\n result = run_pcmd(hosts_client, \"dmg pool upgrade {}\".format(pool_id))\n self.check_result(result)\n result = run_pcmd(hosts_client, \"dmg pool get-prop {}\".format(pool_id))\n self.check_result(result)\n self.log.info(\"(9.2)==verify pool attributes after pool-upgraded.\")\n self.verify_pool_attrs(pool_attr_dict)\n self.pool.destroy()\n\n # (10)Create new pool\n self.log.info(\"(10)==Create new pool after rpms Upgraded\")\n self.add_pool(connect=False)\n pool2_id = self.pool.identifier\n self.get_dmg_command().pool_list(verbose=True)\n self.get_dmg_command().pool_query(pool=pool2_id)\n self.daos_cmd.pool_query(pool=pool2_id)\n result = run_pcmd(hosts_client, \"dmg pool get-prop {}\".format(pool2_id))\n self.check_result(result)\n\n # (11)Downgrade and cleanup\n self.log.info(\"(11)==Downgrade and cleanup.\")\n if ior_api == \"POSIX\":\n result = run_pcmd(hosts_client, \"fusermount3 -u {}\".format(mount_dir))\n self.check_result(result)\n self.container.close()\n self.pool.disconnect()\n self.pool.destroy()\n self.get_dmg_command().system_stop()\n errors = []\n errors.extend(self._stop_managers(self.server_managers, \"servers\"))\n errors.extend(self._stop_managers(self.agent_managers, \"agents\"))\n self.log.info(\"(11.1)==Downgrade RPMs to 2.0.3.\")\n self.downgrade(hosts_server, hosts_client)\n self.log.info(\"==sleeping 30 more seconds\")\n time.sleep(30)\n\n # (12)Cleanup restart server and agent\n self.log.info(\"(12)==Restart 2.0 servers and agent.\")\n self.restart_servers()\n self._start_manager_list(\"agent\", self.agent_managers)\n self.show_daos_version(all_hosts, hosts_client)\n if fault_on_pool_upgrade and not self.has_fault_injection(hosts_client):\n self.fail(\"##(12)Upgraded-rpms did not have fault-injection feature.\")\n self.log.info(\"==(12)Test passed\")",
"def upgrade(self,summary_handle,role,rpm_keyword,image_url,dir_installer,exit_flag,mode,summary_var_dict={}):\n if image_url.endswith(\"/\"):\n imageurl_final = image_url\n else:\n imageurl_final = image_url + \"/\"\n\n length = len(imageurl_final.split('/')) -4\n cmd = \"yum clean all\"\n self.sendCmd(cmd,300)\n dir_installer_role = dir_installer + \"/\" + role\n self.changeDirectory(dir_installer_role)\n tmp_var = \"wget%s%s\" %(self,role)\n\n ##### IF loop added for recovery option\n if mode == \"RECOVERY\":\n flag = self.check_var_in_dict(tmp_var,summary_var_dict)\n if flag == \"false\":\n self.download_rpm(summary_handle,length,imageurl_final,role)\n else:\n self.download_rpm(summary_handle,length,imageurl_final,role)\n\n\n num_files = \"ls -lrt *\\.rpm | grep %s-[0-9] | awk \\'{print $NF}\\' | xargs ls -t | tail -n1\" %rpm_keyword\n output = self.sendCmd(num_files).split(\"\\n\")\n for each in output:\n if each.rstrip().endswith(\"rpm\"):\n\n ##### Step added for uninstalling the rpm before installing \n tmpcmd = \"yum -y remove \" + each.rstrip().rstrip(\".rpm\")\n\n\n tmpcmd1 = \"yum -y install \" + each.rstrip()\n tmp_var = \"%s%s%s\" %(tmpcmd1,self,role)\n\n ##### IF loop added for recovery option\n if mode == \"RECOVERY\":\n flag = self.check_var_in_dict(tmp_var,summary_var_dict)\n if flag == \"true\":\n continue\n\n\n output = self.sendCmd(tmpcmd,600)\n output = self.sendCmd(tmpcmd1,600)\n time.sleep(30)\n output1 = self.sendCmd(\"echo $?\").split(\"\\n\")\n output2 = [item.replace(\"\\r\", \"\") for item in output1]\n if \"0\" not in output2 :\n summary_handle.write(\"%s,%s,%s,fail \\n\" %(tmpcmd1,self,role))\n if exit_flag == \"yes\":\n report.fail(\"Installation failed for %s on node %s having role %s with following error message : \\n %s\" %(each.strip(),self,role,output))\n else:\n logger.info(\"Installation failed for %s on node %s having role %s with following error message : \\n %s\" %(each.strip(),self,role,output))\n else:\n summary_handle.write(\"%s,%s,%s,pass \\n\" %(tmpcmd1,self,role))\n logger.info(\"Successful installation of %s on node %s having role %s\" %(each.strip(),self,role))",
"def daos_ver_after_upgraded(self, host):\n cmds = [\n \"daos version\",\n \"dmg version\",\n \"daos pool query {}\".format(self.pool.identifier)]\n for cmd in cmds:\n self.log.info(\"==cmd= %s\", cmd)\n result = pcmd(host, cmd, False)\n if 0 not in result or len(result) > 1:\n failed = []\n for item, value in list(result.items()):\n if item != 0:\n failed.extend(value)\n raise CommandFailure(\"##Error occurred running '{}' on {}\".format(\n cmd, host))\n self.log.info(\"==>%s result= %s\", cmd, result)",
"def package_upgrade():\n\n if (do_action_package_upgrade('nova-common',\n do_openstack_upgrade,\n CONFIGS)):\n # we should restart the container scoped (subordinate) plugins after a\n # managed openstack upgrade see: BUG#1835557\n for rid in relation_ids('neutron-plugin'):\n neutron_plugin_joined(rid, remote_restart=True)\n for rid in relation_ids('nova-ceilometer'):\n nova_ceilometer_joined(rid, remote_restart=True)\n for rid in relation_ids('nova-vgpu'):\n nova_vgpu_joined(rid, remote_restart=True)\n # NOTE(ajkavanagh) - if unit is paused (usually true for managed\n # upgrade) then the config_changed() function is a no-op\n config_changed()",
"def upgrade(self):\n # The workaround we need in order to fix [1]. In few words,\n # when new Docker is installed the containers MUST NOT start\n # again because in this case puppet inside them will install\n # latest packages and breaks dependencies in some soft.\n #\n # [1]: https://bugs.launchpad.net/fuel/+bug/1455419\n self.supervisor.stop_all_services()\n\n self.install_repos()\n self.update_repo()\n self.install_packages()\n self.run_puppet()",
"def diff_versions_agent_server(self):\n # (1)Setup\n self.log.info(\"==(1)Setup, create pool and container.\")\n hosts_client = self.hostlist_clients\n hosts_server = self.hostlist_servers\n all_hosts = include_local_host(hosts_server | hosts_client)\n self.upgrade_repo = self.params.get(\"upgrade_repo\", '/run/interop/*')\n self.downgrade_repo = self.params.get(\"downgrade_repo\", '/run/interop/*')\n self.add_pool(connect=False)\n pool_id = self.pool.identifier\n self.add_container(self.pool)\n self.container.open()\n cmd = \"dmg system query\"\n positive_test = True\n negative_test = False\n agent_server_ver = \"2.0 agent to 2.0 server\"\n self.verify_daos_libdaos(\"1.1\", hosts_client, cmd, positive_test, agent_server_ver)\n\n # (2)dmg system stop\n self.log.info(\"==(2)Dmg system stop.\")\n self.get_dmg_command().system_stop()\n errors = []\n errors.extend(self._stop_managers(self.server_managers, \"servers\"))\n errors.extend(self._stop_managers(self.agent_managers, \"agents\"))\n\n # (3)Upgrade 1 server-host to new\n self.log.info(\"==(3)Upgrade 1 server to 2.2.\")\n server = hosts_server[0:1]\n self.upgrade(server, [])\n self.log.info(\"==(3.1)server %s Upgrade to 2.2 completed.\", server)\n\n # (4)Negative test - dmg pool query on mix-version servers\n self.log.info(\"==(4)Negative test - dmg pool query on mix-version servers.\")\n agent_server_ver = \"2.0 agent, mix-version server-hosts\"\n cmd = \"dmg pool list\"\n exp_err = \"unable to contact the DAOS Management Service\"\n self.verify_daos_libdaos(\n \"4.1\", hosts_client, cmd, negative_test, agent_server_ver, exp_err)\n\n # (5)Upgrade rest server-hosts to 2.2\n server = hosts_server[1:len(hosts_server)]\n self.log.info(\"==(5) Upgrade rest server %s to 2.2.\", server)\n self.upgrade(server, [])\n self.log.info(\"==(5.1) server %s Upgrade to 2.2 completed.\", server)\n\n # (6)Restart 2.0 agent\n self.log.info(\"==(6)Restart 2.0 agent\")\n self._start_manager_list(\"agent\", self.agent_managers)\n self.show_daos_version(all_hosts, hosts_client)\n\n # (7)Verify 2.0 agent connect to 2.2 server\n self.log.info(\"==(7)Verify 2.0 agent connect to 2.2 server\")\n agent_server_ver = \"2.0 agent to 2.2 server\"\n cmd = \"daos pool query {0}\".format(pool_id)\n self.verify_daos_libdaos(\"7.1\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"dmg pool query {0}\".format(pool_id)\n exp_err = \"admin:0.0.0 are not compatible\"\n self.verify_daos_libdaos(\n \"7.2\", hosts_client, cmd, negative_test, agent_server_ver, exp_err)\n cmd = \"sudo daos_agent dump-attachinfo\"\n self.verify_daos_libdaos(\"7.3\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos cont create {0} --type POSIX --properties 'rf:2'\".format(pool_id)\n self.verify_daos_libdaos(\"7.4\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos pool autotest --pool {0}\".format(pool_id)\n self.verify_daos_libdaos(\"7.5\", hosts_client, cmd, positive_test, agent_server_ver)\n\n # (8)Upgrade agent to 2.2\n self.log.info(\"==(8)Upgrade agent to 2.2, now 2.2 servers 2.2 agent.\")\n self.upgrade([], hosts_client)\n self._start_manager_list(\"agent\", self.agent_managers)\n self.show_daos_version(all_hosts, hosts_client)\n\n # (9)Pool and containers create on 2.2 agent and server\n self.log.info(\"==(9)Create new pools and containers on 2.2 agent to 2.2 server\")\n agent_server_ver = \"2.2 agent to 2.2 server\"\n cmd = \"dmg pool create --size 5G New_pool1\"\n self.verify_daos_libdaos(\"9.1\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"dmg pool list\"\n self.verify_daos_libdaos(\"9.2\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos cont create New_pool1 C21 --type POSIX --properties 'rf:2'\"\n self.verify_daos_libdaos(\"9.3\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos cont create New_pool1 C22 --type POSIX --properties 'rf:2'\"\n self.verify_daos_libdaos(\"9.4\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos container list New_pool1\"\n self.verify_daos_libdaos(\"9.5\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"sudo daos_agent dump-attachinfo\"\n self.verify_daos_libdaos(\"9.6\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos pool autotest --pool New_pool1\"\n self.verify_daos_libdaos(\"9.7\", hosts_client, cmd, positive_test, agent_server_ver)\n\n # (10)Downgrade server to 2.0\n self.log.info(\"==(10)Downgrade server to 2.0, now 2.2 agent to 2.0 server.\")\n self.log.info(\"==(10.1)Dmg system stop.\")\n self.get_dmg_command().system_stop()\n errors = []\n errors.extend(self._stop_managers(self.server_managers, \"servers\"))\n errors.extend(self._stop_managers(self.agent_managers, \"agents\"))\n self.log.info(\"==(10.2)Downgrade server to 2.0\")\n self.downgrade(hosts_server, [])\n self.log.info(\"==(10.3)Restart 2.0 agent\")\n self._start_manager_list(\"agent\", self.agent_managers)\n self.show_daos_version(all_hosts, hosts_client)\n\n # (11)Verify 2.2 agent to 2.0 server\n agent_server_ver = \"2.2 agent to 2.0 server\"\n cmd = \"daos pool query {0}\".format(pool_id)\n self.verify_daos_libdaos(\"11.1\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"dmg pool query {0}\".format(pool_id)\n exp_err = \"does not match\"\n self.verify_daos_libdaos(\n \"11.2\", hosts_client, cmd, negative_test, agent_server_ver, exp_err)\n cmd = \"sudo daos_agent dump-attachinfo\"\n self.verify_daos_libdaos(\"11.3\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos cont create {0} 'C_oldP' --type POSIX --properties 'rf:2'\".format(\n pool_id)\n self.verify_daos_libdaos(\"11.4\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos cont create New_pool1 'C_newP' --type POSIX --properties 'rf:2'\"\n exp_err = \"DER_NO_SERVICE(-2039)\"\n self.verify_daos_libdaos(\n \"11.5\", hosts_client, cmd, negative_test, agent_server_ver, exp_err)\n exp_err = \"common ERR\"\n cmd = \"daos pool autotest --pool {0}\".format(pool_id)\n self.verify_daos_libdaos(\n \"11.6\", hosts_client, cmd, negative_test, agent_server_ver, exp_err)\n\n # (12)Downgrade agent to 2.0\n self.log.info(\"==(12)Agent %s Downgrade started.\", hosts_client)\n self.downgrade([], hosts_client)\n self.log.info(\"==Test passed\")",
"def run_upgrade(args):\n upgrader = Upgrade(\n args.src,\n args.dst,\n PuppetUpgrader(args.src),\n disable_rollback=args.disable_rollback)\n\n upgrader.run()",
"def test_do_upgrade(self):\n with self.with_config_update():\n result = self.runner.invoke(\n cli,\n [\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:latest\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 0",
"def upgrade_cmd(jail, release):\n lgr = ioc_logger.Logger('ioc_cli_upgrade')\n lgr = lgr.getLogger()\n\n jails, paths = IOCList(\"uuid\").list_datasets()\n _jail = {tag: uuid for (tag, uuid) in jails.items() if\n uuid.startswith(jail) or tag == jail}\n\n if len(_jail) == 1:\n tag, uuid = next(iter(_jail.items()))\n path = paths[tag]\n root_path = \"{}/root\".format(path)\n elif len(_jail) > 1:\n lgr.error(\"Multiple jails found for\"\n \" {}:\".format(jail))\n for t, u in sorted(_jail.items()):\n lgr.critical(\" {} ({})\".format(u, t))\n exit(1)\n else:\n lgr.critical(\"{} not found!\".format(jail))\n exit(1)\n\n pool = IOCJson().json_get_value(\"pool\")\n iocroot = IOCJson(pool).json_get_value(\"iocroot\")\n freebsd_version = checkoutput([\"freebsd-version\"])\n status, jid = IOCList.list_get_jid(uuid)\n conf = IOCJson(path).json_load()\n host_release = os.uname()[2]\n jail_release = conf[\"release\"]\n started = False\n\n if conf[\"release\"] == \"EMPTY\":\n lgr.critical(\"Upgrading is not supported for empty jails.\")\n exit(1)\n\n if conf[\"type\"] == \"jail\":\n if not status:\n IOCStart(uuid, tag, path, conf, silent=True)\n status, jid = IOCList.list_get_jid(uuid)\n started = True\n elif conf[\"type\"] == \"basejail\":\n lgr.critical(\"Please run \\\"iocage migrate\\\" before trying\"\n \" to upgrade {} ({})\".format(uuid, tag))\n exit(1)\n elif conf[\"type\"] == \"template\":\n lgr.critical(\"Please convert back to a jail before trying\"\n \" to upgrade {} ({})\".format(uuid, tag))\n exit(1)\n else:\n lgr.critical(\"{} is not a supported jail type.\".format(conf[\"type\"]))\n exit(1)\n\n _freebsd_version = \"{}/releases/{}/root/bin/freebsd-version\".format(\n iocroot, release)\n\n if \"HBSD\" in freebsd_version:\n Popen([\"hbsd-upgrade\", \"-j\", jid]).communicate()\n else:\n if os.path.isfile(\"{}/etc/freebsd-update.conf\".format(root_path)):\n # 10.3-RELEASE and under lack this flag\n if float(host_release.partition(\"-\")[0][:5]) <= 10.3:\n lgr.critical(\"Host: {} is too old, please upgrade to \"\n \"10.3-RELEASE or above\".format(host_release))\n exit(1)\n\n os.environ[\"PAGER\"] = \"/bin/cat\"\n fetch = Popen([\"freebsd-update\", \"-b\", root_path, \"-d\",\n \"{}/var/db/freebsd-update/\".format(root_path), \"-f\",\n \"{}/etc/freebsd-update.conf\".format(root_path),\n \"--currently-running {}\".format(jail_release), \"-r\",\n release, \"upgrade\"], stdin=PIPE)\n fetch.communicate(b\"y\")\n\n while not __upgrade_install__(root_path, release):\n pass\n\n if release[:4].endswith(\"-\"):\n # 9.3-RELEASE and under don't actually have this binary.\n new_release = release\n else:\n with open(_freebsd_version, \"r\") as r:\n for line in r:\n if line.startswith(\"USERLAND_VERSION\"):\n new_release = line.rstrip().partition(\"=\")[\n 2].strip(\n '\"')\n\n IOCJson(path, silent=True).json_set_value(\"release={}\".format(\n new_release))\n\n if started:\n IOCStop(uuid, tag, path, conf, silent=True)\n\n lgr.info(\"\\n{} ({}) successfully upgraded from {} to {}!\".format(\n uuid, tag, jail_release, new_release))",
"def run(self):\n\n run_command(['apt-get', 'update'])\n run_command(['apt-get', 'install', '-y', 'unattended-upgrades'])\n run_command(['apt-get', 'upgrade', '-y'])",
"def post_installation(self, exc_value):\n pass",
"def upgrade(self):",
"def upgrade(self):",
"def command_upgrade(self):\n args = [\n self.cfg.bin_dir / \"arangodb\",\n \"upgrade\",\n \"--starter.endpoint\",\n self.get_http_protocol() + \"://127.0.0.1:\" + str(self.get_my_port()),\n ]\n logging.info(\"StarterManager: Commanding upgrade:\")\n lh.log_cmd(\" \".join([str(arg) for arg in args]))\n self.upgradeprocess = psutil.Popen(\n args,\n # stdout=subprocess.PIPE,\n # stdin=subprocess.PIPE,\n # stderr=subprocess.PIPE,\n universal_newlines=True,\n )\n print(\"Upgrade commander has PID:\" + str(self.upgradeprocess.pid))",
"def Run(self):\n\n # Execution on the master\n if self.cmd == 'MASTER':\n self.PerformMirroredUpgrade()\n \n # The rest of the options are executed on remote hosts\n elif self.cmd == 'CHKDOWN':\n self.CheckDown()\n elif self.cmd == 'SETCATVERSION':\n self.SetCatVersion((str(self.option)==str(True)))\n else:\n raise Exception('Unknown cmd: ' + str(self.cmd))\n \n if self.pool:\n t = self.pool\n self.pool = None\n del t",
"def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"pldmfwuppkgname\", help=\"Name of the PLDM FW update package\"\n )\n parser.add_argument(\"metadatafile\", help=\"Path of metadata JSON file\")\n parser.add_argument(\n \"images\",\n nargs=\"+\",\n help=(\n \"One or more firmware image paths, in the same order as \"\n \" ComponentImageInformationArea entries\"\n ),\n )\n\n args = parser.parse_args()\n image_files = args.images\n with open(args.metadatafile) as file:\n try:\n metadata = json.load(file)\n except ValueError:\n sys.exit(\"ERROR: Invalid metadata JSON file\")\n\n # Validate the number of component images\n if len(image_files) != len(metadata[\"ComponentImageInformationArea\"]):\n sys.exit(\n \"ERROR: number of images passed != number of entries \"\n \" in ComponentImageInformationArea\"\n )\n\n try:\n with open(args.pldmfwuppkgname, \"w+b\") as pldm_fw_up_pkg:\n component_bitmap_bit_length = write_pkg_header_info(\n pldm_fw_up_pkg, metadata\n )\n write_fw_device_identification_area(\n pldm_fw_up_pkg, metadata, component_bitmap_bit_length\n )\n write_component_image_info_area(\n pldm_fw_up_pkg, metadata, image_files\n )\n update_pkg_header_size(pldm_fw_up_pkg)\n write_pkg_header_checksum(pldm_fw_up_pkg)\n append_component_images(pldm_fw_up_pkg, image_files)\n pldm_fw_up_pkg.close()\n except BaseException:\n pldm_fw_up_pkg.close()\n os.remove(args.pldmfwuppkgname)\n raise",
"def upgrade(self):\n self.config.basedeltadir = os.path.join(const.BASESDIR, time.strftime(\"base_%Y.%m.%d-%Hh%Mm%S\"))\n logger.debug(\"Upgrading the container to create a base in {}\".format(self.config.basedeltadir))\n basedelta = os.path.join(self.containerpath, self.config.basedeltadir)\n os.makedirs(basedelta)\n self.config.command = \"upgrade\"\n self.start()\n self.container.wait('STOPPED', const.UPGRADE_TIMEOUT)\n if self.running:\n raise ContainerError(\"The container didn't stop successfully\")\n self.config.command = \"\"\n if os.path.isfile(os.path.join(basedelta, '.upgrade')):\n raise ContainerError(\"The upgrade didn't finish successfully\")",
"def upgrade_script():\n if postgres.db_exists(env.db):\n with cd(path()):\n sudo('bin/upgrade_{odoo} -d {db} '.format(**env), user=env.account)",
"async def on_upgrade_complete(self, upgrade: UpgradeId):",
"def upgrade(ctx):\n tf_cmds = [\n [\"terraform\", \"init\", \"--upgrade\"],\n [\"terraform\", \"refresh\"],\n [\"terraform\", \"apply\", \"-auto-approve\"],\n ]\n\n if ctx.invoked_subcommand is None:\n if click.confirm('Do you want to run upgrade prechecks?'):\n ctx.invoke(precheck)\n else:\n print_warning_msg(f\"Skipping upgrade prechecks\")\n\n click.echo(\n \"Following commands will be run during upgrade\\n%s\" % (\n \"\\n\".join((map(\" \".join, tf_cmds)))\n ),\n )\n for cmd in tf_cmds:\n if click.confirm(\n 'Do you want to continue with %s?' %\n \" \".join(cmd),\n ):\n rc = execute_command(cmd)\n if rc != 0:\n print_error_msg(\"Upgrade Failed!!!\")\n return",
"def YumUninstall(vm):\n _Uninstall(vm)",
"def _provision_package(self):",
"def test_relaunch_deployment_run(self):\n pass",
"def viosupgrade_list(module, targets):\n global CHANGED\n ret = 0\n\n for target in targets:\n # build the command\n cmd = '/usr/sbin/viosupgrade'\n if 'altdisk_install' in module.param['action']:\n cmd += ' -t altdisk'\n elif 'bos_install' in module.param['action']:\n cmd += ' -t bosinst'\n\n # TODO: check if NIM object name is supported, otherwise get it from lsnim\n # NIM object name can be different from hostname\n # 3rd field of following cmd result 'lsnim -Z -a 'if1' <target>' (separated with ':')\n cmd += ' -n ' + target\n\n if target in module.param['mksysb_name']:\n cmd += ' -m ' + module.param['mksysb_name'][target]\n elif 'all' in module.param['mksysb_name']:\n cmd += ' -m ' + module.param['mksysb_name']['all']\n\n if target in module.param['spot_name']:\n cmd += ' -p ' + module.param['spot_name'][target]\n elif 'all' in module.param['spot_name']:\n cmd += ' -p ' + module.param['spot_name']['all']\n\n if target in module.param['rootvg_clone_disk']:\n cmd += ' -a ' + module.param['rootvg_clone_disk'][target]\n elif 'all' in module.param['rootvg_clone_disk']:\n cmd += ' -a ' + module.param['rootvg_clone_disk']['all']\n\n if target in module.param['rootvg_install_disk']:\n cmd += ' -r ' + module.param['rootvg_install_disk'][target]\n elif 'all' in module.param['rootvg_install_disk']:\n cmd += ' -r ' + module.param['rootvg_install_disk']['all']\n\n if target in module.param['skip_rootvg_cloning']:\n if distutils.util.strtobool(module.param['skip_rootvg_cloning'][target]):\n cmd += ' -s'\n elif 'all' in module.param['skip_rootvg_cloning']:\n if distutils.util.strtobool(module.param['skip_rootvg_cloning']['all']):\n cmd += ' -s'\n\n if target in module.param['backup_file']:\n cmd += ' -b ' + module.param['backup_file'][target]\n elif 'all' in module.param['backup_file']:\n cmd += ' -b ' + module.param['backup_file']['all']\n\n if target in module.param['cluster_exists']:\n if distutils.util.strtobool(module.param['cluster_exists'][target]):\n cmd += ' -c'\n elif 'all' in module.param['cluster_exists']:\n if distutils.util.strtobool(module.param['cluster_exists']['all']):\n cmd += ' -c'\n\n if target in module.param['validate_input_data']:\n if distutils.util.strtobool(module.param['validate_input_data'][target]):\n cmd += ' -v'\n elif 'all' in module.param['validate_input_data']:\n if distutils.util.strtobool(module.param['validate_input_data']['all']):\n cmd += ' -v'\n\n supported_res = ['res_resolv_conf', 'res_script', 'res_fb_script',\n 'res_file_res', 'res_image_data', 'res_log']\n for res in supported_res:\n if target in module.param[res]:\n cmd += ' -e {}:{}'.format(res, module.param[res][target])\n elif 'all' in module.param[res]:\n cmd += ' -e {}:{}'.format(res, module.param[res]['all'])\n\n # run the command\n (rc, stdout, stderr) = module.run_command(cmd)\n\n CHANGED=True # don't really know\n logging.info(\"[STDOUT] {}\".format(stdout))\n if rc == 0:\n logging.info(\"[STDERR] {}\".format(stderr))\n else:\n logging.error(\"command {} failed: {}\".format(stderr))\n ret += 1\n\n return ret",
"def upgrade(self):\n # replace '\\' with '/' (For NT system compatability)\n filesplit = upgrade_file.replace('\\\\', '/')\n filesplit = filesplit.split('/')\n shortfilename = filesplit[-1]\n\n #define the JSON data for the multipart\n upgradejsondata = {media: {\"image\": partition, \"image-file\": shortfilename, \"reboot-after-upgrade\": 0}}\n url = self.base_url + 'upgrade/hd'\n\n #define the headers that have your auth token\n headers = {'Authorization': \"A10 \" + self.token}\n try:\n print(self.device + ' Performing upgrade, this may take a few minutes depending on your connection, please wait...')\n response = requests.post(url, headers=headers, files={'file': (shortfilename, open(upgrade_file, 'rb'), 'application/octet-stream'), 'json': (None, json.dumps(upgradejsondata), 'application/json'),}, verify=False)\n if response.status_code == 204:\n print(self.device + ' The device successfully upgraded')\n except Exception as e:\n print(' ERROR: Upgrade failed on ' + self.device + ' - ' + str(e))\n return 'FAIL'",
"def prepareUninstall():\n pass",
"def do_workload(self):\n module_manager = self._core.get_module_manager()\n module = module_manager.get_module_by_name(self._values[\"name\"])\n module_manager.uninstall_module(module)",
"def __upgrade_install__(path, release):\n install = Popen([\"freebsd-update\", \"-b\", path, \"-d\",\n \"{}/var/db/freebsd-update/\".format(path), \"-f\",\n \"{}/etc/freebsd-update.conf\".format(path), \"-r\",\n release, \"install\"], stderr=PIPE)\n install.communicate()\n\n return install.returncode",
"def safe_upgrade():\n goviewbe.upgrade_db(current_app)",
"def YumInstall(vm):\n raise NotImplementedError"
] | [
"0.7017949",
"0.62637675",
"0.61918133",
"0.5806434",
"0.5690036",
"0.56347877",
"0.53812015",
"0.5361644",
"0.53614783",
"0.535473",
"0.5353267",
"0.53437454",
"0.53437454",
"0.53120893",
"0.5283165",
"0.5260421",
"0.5251918",
"0.5241946",
"0.51609534",
"0.51303685",
"0.5122055",
"0.51175326",
"0.5087872",
"0.50736713",
"0.5031372",
"0.5007906",
"0.5000342",
"0.49980304",
"0.4988963",
"0.49840537"
] | 0.73163605 | 0 |
Interoperability of different versions of DAOS agent and server. | def diff_versions_agent_server(self):
# (1)Setup
self.log.info("==(1)Setup, create pool and container.")
hosts_client = self.hostlist_clients
hosts_server = self.hostlist_servers
all_hosts = include_local_host(hosts_server | hosts_client)
self.upgrade_repo = self.params.get("upgrade_repo", '/run/interop/*')
self.downgrade_repo = self.params.get("downgrade_repo", '/run/interop/*')
self.add_pool(connect=False)
pool_id = self.pool.identifier
self.add_container(self.pool)
self.container.open()
cmd = "dmg system query"
positive_test = True
negative_test = False
agent_server_ver = "2.0 agent to 2.0 server"
self.verify_daos_libdaos("1.1", hosts_client, cmd, positive_test, agent_server_ver)
# (2)dmg system stop
self.log.info("==(2)Dmg system stop.")
self.get_dmg_command().system_stop()
errors = []
errors.extend(self._stop_managers(self.server_managers, "servers"))
errors.extend(self._stop_managers(self.agent_managers, "agents"))
# (3)Upgrade 1 server-host to new
self.log.info("==(3)Upgrade 1 server to 2.2.")
server = hosts_server[0:1]
self.upgrade(server, [])
self.log.info("==(3.1)server %s Upgrade to 2.2 completed.", server)
# (4)Negative test - dmg pool query on mix-version servers
self.log.info("==(4)Negative test - dmg pool query on mix-version servers.")
agent_server_ver = "2.0 agent, mix-version server-hosts"
cmd = "dmg pool list"
exp_err = "unable to contact the DAOS Management Service"
self.verify_daos_libdaos(
"4.1", hosts_client, cmd, negative_test, agent_server_ver, exp_err)
# (5)Upgrade rest server-hosts to 2.2
server = hosts_server[1:len(hosts_server)]
self.log.info("==(5) Upgrade rest server %s to 2.2.", server)
self.upgrade(server, [])
self.log.info("==(5.1) server %s Upgrade to 2.2 completed.", server)
# (6)Restart 2.0 agent
self.log.info("==(6)Restart 2.0 agent")
self._start_manager_list("agent", self.agent_managers)
self.show_daos_version(all_hosts, hosts_client)
# (7)Verify 2.0 agent connect to 2.2 server
self.log.info("==(7)Verify 2.0 agent connect to 2.2 server")
agent_server_ver = "2.0 agent to 2.2 server"
cmd = "daos pool query {0}".format(pool_id)
self.verify_daos_libdaos("7.1", hosts_client, cmd, positive_test, agent_server_ver)
cmd = "dmg pool query {0}".format(pool_id)
exp_err = "admin:0.0.0 are not compatible"
self.verify_daos_libdaos(
"7.2", hosts_client, cmd, negative_test, agent_server_ver, exp_err)
cmd = "sudo daos_agent dump-attachinfo"
self.verify_daos_libdaos("7.3", hosts_client, cmd, positive_test, agent_server_ver)
cmd = "daos cont create {0} --type POSIX --properties 'rf:2'".format(pool_id)
self.verify_daos_libdaos("7.4", hosts_client, cmd, positive_test, agent_server_ver)
cmd = "daos pool autotest --pool {0}".format(pool_id)
self.verify_daos_libdaos("7.5", hosts_client, cmd, positive_test, agent_server_ver)
# (8)Upgrade agent to 2.2
self.log.info("==(8)Upgrade agent to 2.2, now 2.2 servers 2.2 agent.")
self.upgrade([], hosts_client)
self._start_manager_list("agent", self.agent_managers)
self.show_daos_version(all_hosts, hosts_client)
# (9)Pool and containers create on 2.2 agent and server
self.log.info("==(9)Create new pools and containers on 2.2 agent to 2.2 server")
agent_server_ver = "2.2 agent to 2.2 server"
cmd = "dmg pool create --size 5G New_pool1"
self.verify_daos_libdaos("9.1", hosts_client, cmd, positive_test, agent_server_ver)
cmd = "dmg pool list"
self.verify_daos_libdaos("9.2", hosts_client, cmd, positive_test, agent_server_ver)
cmd = "daos cont create New_pool1 C21 --type POSIX --properties 'rf:2'"
self.verify_daos_libdaos("9.3", hosts_client, cmd, positive_test, agent_server_ver)
cmd = "daos cont create New_pool1 C22 --type POSIX --properties 'rf:2'"
self.verify_daos_libdaos("9.4", hosts_client, cmd, positive_test, agent_server_ver)
cmd = "daos container list New_pool1"
self.verify_daos_libdaos("9.5", hosts_client, cmd, positive_test, agent_server_ver)
cmd = "sudo daos_agent dump-attachinfo"
self.verify_daos_libdaos("9.6", hosts_client, cmd, positive_test, agent_server_ver)
cmd = "daos pool autotest --pool New_pool1"
self.verify_daos_libdaos("9.7", hosts_client, cmd, positive_test, agent_server_ver)
# (10)Downgrade server to 2.0
self.log.info("==(10)Downgrade server to 2.0, now 2.2 agent to 2.0 server.")
self.log.info("==(10.1)Dmg system stop.")
self.get_dmg_command().system_stop()
errors = []
errors.extend(self._stop_managers(self.server_managers, "servers"))
errors.extend(self._stop_managers(self.agent_managers, "agents"))
self.log.info("==(10.2)Downgrade server to 2.0")
self.downgrade(hosts_server, [])
self.log.info("==(10.3)Restart 2.0 agent")
self._start_manager_list("agent", self.agent_managers)
self.show_daos_version(all_hosts, hosts_client)
# (11)Verify 2.2 agent to 2.0 server
agent_server_ver = "2.2 agent to 2.0 server"
cmd = "daos pool query {0}".format(pool_id)
self.verify_daos_libdaos("11.1", hosts_client, cmd, positive_test, agent_server_ver)
cmd = "dmg pool query {0}".format(pool_id)
exp_err = "does not match"
self.verify_daos_libdaos(
"11.2", hosts_client, cmd, negative_test, agent_server_ver, exp_err)
cmd = "sudo daos_agent dump-attachinfo"
self.verify_daos_libdaos("11.3", hosts_client, cmd, positive_test, agent_server_ver)
cmd = "daos cont create {0} 'C_oldP' --type POSIX --properties 'rf:2'".format(
pool_id)
self.verify_daos_libdaos("11.4", hosts_client, cmd, positive_test, agent_server_ver)
cmd = "daos cont create New_pool1 'C_newP' --type POSIX --properties 'rf:2'"
exp_err = "DER_NO_SERVICE(-2039)"
self.verify_daos_libdaos(
"11.5", hosts_client, cmd, negative_test, agent_server_ver, exp_err)
exp_err = "common ERR"
cmd = "daos pool autotest --pool {0}".format(pool_id)
self.verify_daos_libdaos(
"11.6", hosts_client, cmd, negative_test, agent_server_ver, exp_err)
# (12)Downgrade agent to 2.0
self.log.info("==(12)Agent %s Downgrade started.", hosts_client)
self.downgrade([], hosts_client)
self.log.info("==Test passed") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def server_agent():",
"def test_upgrade_shared_dependencies(self):\n result = self.run_cli_command(\"-s\", \"upgrade\", cwd=self._get_cwd())\n assert result.exit_code == 0\n\n agent_config: AgentConfig = cast(\n AgentConfig,\n load_item_config(PackageType.AGENT.value, Path(self.current_agent_context)),\n )\n assert OefSearchMessage.protocol_id in agent_config.protocols\n assert SOEF_PUBLIC_ID in agent_config.connections\n assert OEF_PUBLIC_ID in agent_config.connections",
"def main():\n # Check available services in config\n with open(CONFIG_FILE, 'r') as f:\n config = yaml.load(f, Loader=yaml.SafeLoader)['darc_master']\n if config['mode'] == 'real-time':\n services = config['services_master_rt'] + config['services_worker_rt']\n elif config['mode'] == 'mixed':\n services = config['services_master_mix'] + config['services_worker_mix']\n else:\n services = config['services_master_off'] + config['services_worker_off']\n master_commands = config['master_commands']\n service_commands = config['service_commands']\n commands = list(set(master_commands + service_commands))\n\n # Parse arguments\n parser = ArgumentParser(formatter_class=RawTextHelpFormatter)\n parser.add_argument('--service', type=str, help=\"Which service to interact with, \"\n \" available services: {}, or all\".format(', '.join(services)))\n parser.add_argument('--timeout', type=int, default=10, help=\"Timeout for sending command \"\n \"(Default: %(default)ss)\")\n parser.add_argument('--host', type=str, default='localhost', help=\"Host to send command to \"\n \"(Default: %(default)s)\")\n parser.add_argument('--port', type=int, help=\"Port DARC listens to \"\n \"(Default: determine from DARC config file)\")\n parser.add_argument('--parset', type=str, help=\"Observation parset (takes precedence over --config)\")\n parser.add_argument('--config', type=str, help=\"Node observation config\")\n parser.add_argument('--version', action=\"store_true\", help=\"Print version info\")\n\n parser.add_argument('cmd', type=str, nargs='*', help=\"Command to execute. When using get_attr, add space \"\n \"followed by attribute. Available commands: \"\n \"{}\".format(', '.join(commands)))\n args = parser.parse_args()\n\n # Print version and exit\n if args.version:\n logger.info(f\"{darc.__version__}\")\n sys.exit(0)\n\n # Check arguments\n if not args.cmd:\n logger.error(\"Add command to execute e.g. \\\"darc --service amber_listener status\\\"\")\n sys.exit(1)\n cmd = args.cmd[0]\n\n try:\n attr = args.cmd[1]\n except IndexError:\n attr = None\n\n if cmd not in commands:\n logger.error(\"Unknown command: {}. Run darc -h to see available commands\".format(cmd))\n sys.exit(1)\n elif not args.service and cmd not in master_commands:\n logger.error(\"Argument --service is required for given command\")\n sys.exit(1)\n\n # add attribute to command if get_attr is called\n if attr is not None:\n if cmd == 'get_attr':\n cmd += f\" {attr}\"\n else:\n logger.error(\"Attribute can only be provided when using get_attr command\")\n sys.exit(1)\n\n # If command is edit, open config in an editor\n if cmd == 'edit':\n with open(CONFIG_FILE, 'r') as f:\n master_config = yaml.load(f, Loader=yaml.SafeLoader)['darc_master']\n default_editor = master_config['editor']\n editor = os.environ.get('EDITOR', default_editor)\n ret = subprocess.Popen([editor, CONFIG_FILE]).wait()\n if ret != 0:\n logger.error(\"Editor did not exit properly\")\n else:\n logger.info(\"Restart services to apply new settings, or run 'darc reload' to reload the master config.\\n\"\n \"WARNING: Restarting services aborts any running observation.\\n\"\n \"For services without queue server (i.e. all except LOFARTrigger and VOEventGenerator),\\n\"\n \"the config is automatically reloaded at the start of each observation.\")\n sys.exit(ret)\n\n # Get payload\n if args.parset:\n payload = args.parset\n elif args.config:\n payload = args.config\n else:\n payload = None\n\n if not send_command(args.timeout, args.service, cmd, host=args.host, port=args.port, payload=payload):\n sys.exit(1)",
"def main():\n utils.vip_main(AHUAgent, version=__version__)",
"def __init__(self, index=None):\n namespace = \"/run/server_config/servers/*\"\n if isinstance(index, int):\n namespace = \"/run/server_config/servers/{}/*\".format(index)\n super(DaosServerConfig.SingleServerConfig, self).__init__(namespace)\n\n # Use environment variables to get default parameters\n default_interface = os.environ.get(\"OFI_INTERFACE\", \"eth0\")\n default_port = os.environ.get(\"OFI_PORT\", 31416)\n\n # Parameters\n # targets: count of VOS targets\n # first_core: starting index for targets\n # nr_xs_helpers: offload helpers per server\n # fabric_iface: map to OFI_INTERFACE=eth0\n # fabric_iface_port: map to OFI_PORT=31416\n # log_mask: map to D_LOG_MASK env\n # log_file: map to D_LOG_FILE env\n # env_vars: influences DAOS IO Server behaviour\n # Add to enable scalable endpoint:\n # - CRT_CREDIT_EP_CTX=0\n # - CRT_CTX_SHARE_ADDR=1\n # - CRT_CTX_NUM=8\n # nvme options:\n # - IO_STAT_PERIOD=10\n self.targets = BasicParameter(None, 8)\n self.first_core = BasicParameter(None, 0)\n self.nr_xs_helpers = BasicParameter(None, 16)\n self.fabric_iface = BasicParameter(None, default_interface)\n self.fabric_iface_port = BasicParameter(None, default_port)\n self.pinned_numa_node = BasicParameter(None)\n self.log_mask = BasicParameter(None, \"DEBUG,RPC=ERR\")\n self.log_file = BasicParameter(None, \"daos_server.log\")\n self.env_vars = BasicParameter(\n None,\n [\"ABT_ENV_MAX_NUM_XSTREAMS=100\",\n \"ABT_MAX_NUM_XSTREAMS=100\",\n \"DAOS_MD_CAP=1024\",\n \"CRT_CTX_SHARE_ADDR=0\",\n \"CRT_TIMEOUT=30\",\n \"FI_SOCKETS_MAX_CONN_RETRY=1\",\n \"FI_SOCKETS_CONN_TIMEOUT=2000\",\n \"DD_MASK=mgmt,io,md,epc,rebuild\"]\n )\n\n # Storage definition parameters:\n #\n # When scm_class is set to ram, tmpfs will be used to emulate SCM.\n # scm_mount: /mnt/daos - map to -s /mnt/daos\n # scm_class: ram\n # scm_size: 6 - size in GB units\n #\n # When scm_class is set to dcpm, scm_list is the list of device\n # paths for AppDirect pmem namespaces (currently only one per\n # server supported).\n # scm_class: dcpm\n # scm_list: [/dev/pmem0]\n #\n # If using NVMe SSD (will write /mnt/daos/daos_nvme.conf and start\n # I/O service with -n <path>)\n # bdev_class: nvme\n # bdev_list: [\"0000:81:00.0\"] - generate regular nvme.conf\n #\n # If emulating NVMe SSD with malloc devices\n # bdev_class: malloc - map to VOS_BDEV_CLASS=MALLOC\n # bdev_size: 4 - malloc size of each device in GB.\n # bdev_number: 1 - generate nvme.conf as follows:\n # [Malloc]\n # NumberOfLuns 1\n # LunSizeInMB 4000\n #\n # If emulating NVMe SSD over kernel block device\n # bdev_class: kdev - map to VOS_BDEV_CLASS=AIO\n # bdev_list: [/dev/sdc] - generate nvme.conf as follows:\n # [AIO]\n # AIO /dev/sdc AIO2\n #\n # If emulating NVMe SSD with backend file\n # bdev_class: file - map to VOS_BDEV_CLASS=AIO\n # bdev_size: 16 - file size in GB. Create file if\n # it does not exist.\n # bdev_list: [/tmp/daos-bdev] - generate nvme.conf as follows:\n # [AIO]\n # AIO /tmp/aiofile AIO1 4096\n self.scm_mount = BasicParameter(None, \"/mnt/daos\")\n self.scm_class = BasicParameter(None, \"ram\")\n self.scm_size = BasicParameter(None, 16)\n self.scm_list = BasicParameter(None)\n self.bdev_class = BasicParameter(None)\n self.bdev_list = BasicParameter(None)\n self.bdev_size = BasicParameter(None)\n self.bdev_number = BasicParameter(None)",
"def main():\n parser = argparse.ArgumentParser(\n description=\"Evacuate a neutron l3-agent\")\n parser.add_argument('-f', '--from-l3agent', help='l3agent uuid', required=True)\n parser.add_argument('-t', '--to-l3agent', help='l3agent uuid', required=True)\n parser.add_argument('-r', '--router', help='specific router')\n parser.add_argument('-l', '--limit', help='max number of routers to migrate')\n parser.add_argument('-v', '--verbose', help='verbose', action='store_true')\n args = parser.parse_args()\n\n # get OS_* environment variables\n os_auth_url = get_environ('OS_AUTH_URL', args.verbose)\n os_username = get_environ('OS_USERNAME', args.verbose)\n os_password = get_environ('OS_PASSWORD', args.verbose)\n os_tenant_name = get_environ('OS_TENANT_NAME', args.verbose)\n os_region_name = get_environ('OS_REGION_NAME', args.verbose)\n\n\n api = openstackapi.OpenstackAPI(os_auth_url, os_username, os_password, os_project_name=os_tenant_name)\n if args.limit:\n limit=int(args.limit)\n else:\n limit = 0\n\n #Validate agent's UUID\n validateargs(api, os_region_name, args.from_l3agent, args.to_l3agent, args.router)\n\n if args.router:\n moverouter(api, os_region_name, args.from_l3agent, args.to_l3agent, args.router)\n else:\n evacuate_l3_agent(api, os_region_name, args.from_l3agent, args.to_l3agent, limit)",
"def main():\n utils.vip_main(actuator_agent, identity='platform.d.actuator')",
"def __init__(self, *args, **kwargs):\n AgentModule.__init__(self, *args, **kwargs)\n self.name = 'MonitorAgents'\n self.setup = \"Production\"\n self.enabled = False\n self.restartAgents = False\n self.restartExecutors = False\n self.restartServices = False\n self.controlComponents = False\n self.commitURLs = False\n self.diracLocation = \"/opt/dirac/pro\"\n\n self.sysAdminClient = SystemAdministratorClient(socket.gethostname())\n self.jobMonClient = JobMonitoringClient()\n self.nClient = NotificationClient()\n self.csAPI = None\n self.agents = dict()\n self.executors = dict()\n self.services = dict()\n self.errors = list()\n self.accounting = defaultdict(dict)\n\n self.addressTo = [\"[email protected]\"]\n self.addressFrom = \"[email protected]\"\n self.emailSubject = \"MonitorAgents on %s\" % socket.gethostname()",
"async def test_dbus_osagent(\n os_agent_service: OSAgentService, dbus_session_bus: MessageBus\n):\n os_agent = OSAgent()\n\n assert os_agent.version is None\n assert os_agent.diagnostics is None\n\n await os_agent.connect(dbus_session_bus)\n\n assert os_agent.version == \"1.1.0\"\n assert os_agent.diagnostics\n\n os_agent_service.emit_properties_changed({\"Diagnostics\": False})\n await os_agent_service.ping()\n assert os_agent.diagnostics is False\n\n os_agent_service.emit_properties_changed({}, [\"Diagnostics\"])\n await os_agent_service.ping()\n await os_agent_service.ping()\n assert os_agent.diagnostics is True",
"def describe_agent_versions(StackId=None, ConfigurationManager=None):\n pass",
"def makeService_Agent(self, options):\n\n # Don't use memcached initially -- calendar server might take it away\n # at any moment. However, when we run a command through the gateway,\n # it will conditionally set ClientEnabled at that time.\n def agentPostUpdateHook(configDict, reloading=False):\n configDict.Memcached.Pools.Default.ClientEnabled = False\n\n config.addPostUpdateHooks((agentPostUpdateHook,))\n config.reload()\n\n # Verify that server root actually exists and is not phantom\n checkDirectory(\n config.ServerRoot,\n \"Server root\",\n access=W_OK,\n wait=True # Wait in a loop until ServerRoot exists and is not phantom\n )\n\n # These we need to set in order to open the store\n config.EnableCalDAV = config.EnableCardDAV = True\n\n def agentServiceCreator(pool, store, ignored, storageService):\n from calendarserver.tools.agent import makeAgentService\n if storageService is not None:\n # Shut down if DataRoot becomes unavailable\n from twisted.internet import reactor\n dataStoreWatcher = DirectoryChangeListener(\n reactor,\n config.DataRoot,\n DataStoreMonitor(reactor, storageService)\n )\n dataStoreWatcher.startListening()\n if store is not None:\n store.queuer = NonPerformingQueuer()\n return makeAgentService(store)\n\n uid, gid = getSystemIDs(config.UserName, config.GroupName)\n svc = self.storageService(\n agentServiceCreator, None, uid=uid, gid=gid\n )\n agentLoggingService = ErrorLoggingMultiService(\n config.ErrorLogEnabled,\n config.AgentLogFile,\n config.ErrorLogRotateMB * 1024 * 1024,\n config.ErrorLogMaxRotatedFiles,\n config.ErrorLogRotateOnStart,\n )\n svc.setName(\"agent\")\n svc.setServiceParent(agentLoggingService)\n return agentLoggingService",
"def main():\n utils.vip_main(ahu_agent, version=__version__)",
"def __init__( self, conn, addr, server, version ):",
"def main():\n client = axiothea_client.Client()\n server = axiothea_server.Server()\n edge_server = axiothea_server.Server\n edge_client = axiothea_edge.Client\n server.run(client, edge_server, edge_client)",
"def __init__(self, session_name=\"ios.0/0/CPU0\"):\n args = shlex.split(\n 'script -c \"lboot -cqm {} -- {}\" /dev/null'.format(\n session_name,\n _JSON_RPC_SERVER_PATH))\n super(EnXRTransport, self).__init__(args)",
"def __init__( self, agentName, loadName, baseAgentName = False, properties = {} ):\n if baseAgentName and agentName == baseAgentName:\n self.log = gLogger\n standaloneModule = True\n else:\n self.log = gLogger.getSubLogger( agentName, child = False )\n standaloneModule = False\n\n self.__basePath = gConfig.getValue( '/LocalSite/InstancePath', rootPath )\n self.__agentModule = None\n self.__codeProperties = {}\n self.__getCodeInfo()\n\n self.__moduleProperties = { 'fullName' : agentName,\n 'loadName' : loadName,\n 'section' : PathFinder.getAgentSection( agentName ),\n 'loadSection' : PathFinder.getAgentSection( loadName ),\n 'standalone' : standaloneModule,\n 'cyclesDone' : 0,\n 'totalElapsedTime' : 0,\n 'setup' : gConfig.getValue( \"/DIRAC/Setup\", \"Unknown\" ),\n 'alive' : True }\n self.__moduleProperties[ 'system' ], self.__moduleProperties[ 'agentName' ] = agentName.split( \"/\" )\n self.__configDefaults = {}\n self.__configDefaults[ 'MonitoringEnabled'] = True\n self.__configDefaults[ 'Enabled'] = self.am_getOption( \"Status\", \"Active\" ).lower() in ( 'active' )\n self.__configDefaults[ 'PollingTime'] = self.am_getOption( \"PollingTime\", 120 )\n self.__configDefaults[ 'MaxCycles'] = self.am_getOption( \"MaxCycles\", 500 )\n self.__configDefaults[ 'ControlDirectory' ] = os.path.join( self.__basePath,\n 'control',\n *agentName.split( \"/\" ) )\n self.__configDefaults[ 'WorkDirectory' ] = os.path.join( self.__basePath,\n 'work',\n *agentName.split( \"/\" ) )\n self.__configDefaults[ 'shifterProxy' ] = ''\n self.__configDefaults[ 'shifterProxyLocation' ] = os.path.join( self.__configDefaults[ 'WorkDirectory' ],\n '.shifterCred' )\n\n\n if type( properties ) == types.DictType:\n for key in properties:\n self.__moduleProperties[ key ] = properties[ key ]\n self.__moduleProperties[ 'executors' ] = [ ( self.execute, () ) ]\n self.__moduleProperties[ 'shifterProxy' ] = False\n\n self.__monitorLastStatsUpdate = -1\n self.monitor = None\n self.__initializeMonitor()\n self.__initialized = False",
"def server_plugin():",
"def agent_init(self):\n pass",
"def server_version(self, timeout):\n _abstract()",
"def server_version(self, timeout):\n _abstract()",
"def test_unix_client_system_connection(core_session, agent_enrolled_unix_system_with_users, proxy_start_stop):\n\n \"\"\"\n Testrail Link:\n https://testrail.centrify.com/index.php?/cases/view/1293084\n https://testrail.centrify.com/index.php?/cases/view/1293085\n https://testrail.centrify.com/index.php?/cases/view/1293086\n \"\"\"\n\n # verfiy the test is run with single thread.\n assert 'PYTEST_XDIST_WORKER_COUNT' not in os.environ, \\\n f'This test cannot be run with multiple threads due to starting and stopping connectors'\n\n enrolledsystems = agent_enrolled_unix_system_with_users\n session = enrolledsystems[0][\"Session\"]\n resourceid = enrolledsystems[0][\"ResourceId\"]\n proxyid = enrolledsystems[0][\"ProxyId\"]\n proxycontrol = proxy_start_stop\n\n logger.info(\"stop the agent\")\n ssh_manager.ssh_stop_agent(session)\n logger.info(\"start the connector\")\n proxycontrol(proxyid, True)\n\n logger.info(\"Testing connection to the computer, Connector is ready\")\n result, success = ResourceManager.get_system_health(core_session, resourceid)\n assert success and result == 'OK', f\"Unable to verify system is reachable {result} {success}\"\n\n # stop Conector , Should fail\n logger.info(\"Stopping the connector\")\n proxycontrol(proxyid, False)\n logger.info(\"Testing connection to the system\")\n result, success = ResourceManager.get_system_health(core_session, resourceid)\n assert success and result != 'OK', f\"cerify system is reachable {result} {success}\"\n\n # Start agent\n logger.info(\"Starting the agent\")\n ssh_manager.ssh_start_agent(session, True)\n logger.info(\"Testing connection to the computer, agent is available.\")\n result, success = ResourceManager.get_system_health(core_session, resourceid)\n assert success and result == 'OK', f\"Unable to verify system is reachable {result} {success}\"\n\n # verify account again, both connector and agent are running \n proxycontrol(proxyid, True)\n logger.info(\"Testing connection to the computer, both agent and connector are available\")\n result, success = ResourceManager.get_system_health(core_session, resourceid)\n assert success and result == 'OK', f\"Unable to verify system is reachable {result} {success}\"",
"def __init__(\n self,\n node_ip_address,\n dashboard_agent_port,\n gcs_address,\n minimal,\n metrics_export_port=None,\n node_manager_port=None,\n listen_port=ray_constants.DEFAULT_DASHBOARD_AGENT_LISTEN_PORT,\n disable_metrics_collection: bool = False,\n *, # the following are required kwargs\n object_store_name: str,\n raylet_name: str,\n log_dir: str,\n temp_dir: str,\n session_dir: str,\n logging_params: dict,\n agent_id: int,\n session_name: str,\n ):\n # Public attributes are accessible for all agent modules.\n self.ip = node_ip_address\n self.minimal = minimal\n\n assert gcs_address is not None\n self.gcs_address = gcs_address\n\n self.temp_dir = temp_dir\n self.session_dir = session_dir\n self.log_dir = log_dir\n self.dashboard_agent_port = dashboard_agent_port\n self.metrics_export_port = metrics_export_port\n self.node_manager_port = node_manager_port\n self.listen_port = listen_port\n self.object_store_name = object_store_name\n self.raylet_name = raylet_name\n self.logging_params = logging_params\n self.node_id = os.environ[\"RAY_NODE_ID\"]\n self.metrics_collection_disabled = disable_metrics_collection\n self.agent_id = agent_id\n self.session_name = session_name\n # TODO(edoakes): RAY_RAYLET_PID isn't properly set on Windows. This is\n # only used for fate-sharing with the raylet and we need a different\n # fate-sharing mechanism for Windows anyways.\n if sys.platform not in [\"win32\", \"cygwin\"]:\n self.ppid = int(os.environ[\"RAY_RAYLET_PID\"])\n assert self.ppid > 0\n logger.info(\"Parent pid is %s\", self.ppid)\n\n # grpc server is None in mininal.\n self.server = None\n # http_server is None in minimal.\n self.http_server = None\n\n # Used by the agent and sub-modules.\n # TODO(architkulkarni): Remove gcs_client once the agent exclusively uses\n # gcs_aio_client and not gcs_client.\n self.gcs_client = GcsClient(address=self.gcs_address)\n _initialize_internal_kv(self.gcs_client)\n assert _internal_kv_initialized()\n self.gcs_aio_client = GcsAioClient(address=self.gcs_address)\n\n if not self.minimal:\n self._init_non_minimal()",
"def test_can_get_agentlist(pa_instance):\n wrapper, agent_uuid = pa_instance\n publickey, secretkey = get_new_keypair()\n\n agent = wrapper.build_agent(\n serverkey=wrapper.publickey, publickey=publickey, secretkey=secretkey)\n peers = agent.vip.peerlist().get(timeout=2)\n assert VOLTTRON_CENTRAL_PLATFORM in peers\n\n # Make a call to manage which should return to us the publickey of the\n # platform.agent on the instance.\n papublickey = agent.vip.rpc.call(\n VOLTTRON_CENTRAL_PLATFORM, 'manage', wrapper.vip_address,\n wrapper.publickey, agent.core.publickey).get(timeout=2)\n assert papublickey\n\n agentlist = agent.vip.rpc.call(\n VOLTTRON_CENTRAL_PLATFORM, \"list_agents\"\n ).get(timeout=2)\n\n assert isinstance(agentlist, list)\n assert len(agentlist) == 1\n retagent = agentlist[0]\n assert retagent['uuid'] == agent_uuid\n checkkeys = ('process_id', 'error_code', 'is_running', 'permissions',\n 'health')\n for k in checkkeys:\n assert k in retagent.keys()\n\n # make sure can stop is determined to be false\n assert retagent['permissions']['can_stop'] == False",
"def main():\n compare_versions(Reactome())",
"def connect_to_master():",
"def detect_arangosh_instances(self, config, old_version):\n if self.arangosh is None:\n config.port = self.get_frontend_port()\n config.passvoid = self.passvoid\n self.arangosh = ArangoshExecutor(config, self.get_frontend(), old_version)\n self.arango_importer = ArangoImportExecutor(config, self.get_frontend())\n self.arango_restore = ArangoRestoreExecutor(config, self.get_frontend())\n if config.hot_backup_supported:\n self.hb_instance = HotBackupManager(\n config,\n self.raw_basedir,\n config.base_test_dir / self.raw_basedir,\n self.get_frontend(),\n )\n self.hb_config = HotBackupConfig(\n config,\n self.raw_basedir,\n config.base_test_dir / self.raw_basedir,\n )",
"def daos_ver_after_upgraded(self, host):\n cmds = [\n \"daos version\",\n \"dmg version\",\n \"daos pool query {}\".format(self.pool.identifier)]\n for cmd in cmds:\n self.log.info(\"==cmd= %s\", cmd)\n result = pcmd(host, cmd, False)\n if 0 not in result or len(result) > 1:\n failed = []\n for item, value in list(result.items()):\n if item != 0:\n failed.extend(value)\n raise CommandFailure(\"##Error occurred running '{}' on {}\".format(\n cmd, host))\n self.log.info(\"==>%s result= %s\", cmd, result)",
"def __init__(self, args:argparse.Namespace):\n\t\tglobal DISTRO\n\n\t\tself.dispersion = args.dispersion if args.dispersion > 0 else 0\n\t\tself.login_dispersion = args.login_dispersion if args.login_dispersion > 0 else 0\n\t\tself.wait_for_parents = bool(args.wait_for_parents)\n\t\tself.retries = args.retries if args.retries > 0 else 0\n\t\tself.rev_proxy_disable = args.rev_proxy_disable\n\t\tself.verify = not args.insecure\n\n\t\tsetLogLevel(args.log_level)\n\n\t\tlogging.info(\"Distribution detected as: '%s'\", DISTRO)\n\n\t\tself.hostname = (platform.node().split('.')[0], platform.node())\n\t\tlogging.info(\"Hostname detected as: '%s'\", self.fullHostname)\n\n\t\ttry:\n\t\t\tself.mode = Configuration.Modes[args.Mode.upper()]\n\t\texcept KeyError as e:\n\t\t\traise ValueError(\"Unrecognized Mode: '%s'\" % args.Mode)\n\n\t\tself.tsroot = parseTSRoot(args.ts_root)\n\t\tlogging.info(\"ATS root installation directory set to '%s'\", self.tsroot)\n\n\t\tself.useSSL, self.toHost, self.toPort = parseTOURL(args.to_url, self.verify)\n\t\tself.username, self.password = args.to_user, args.to_password",
"def main():\n utils.vip_main(peakShaverAgent, \n version=__version__)",
"def main():\n\n # Handling arguments\n args = get_args()\n debug = args.debug\n log_file = None\n if args.logfile:\n log_file = args.logfile\n nuage_enterprise = args.nuage_enterprise\n nuage_host = args.nuage_host\n nuage_port = args.nuage_port\n nuage_password = None\n if args.nuage_password:\n nuage_password = args.nuage_password\n nuage_username = args.nuage_username\n# nosslcheck = args.nosslcheck\n verbose = args.verbose\n fip_net = args.fip_net\n uplink_addr = args.uplink_addr\n uplink_mask = args.uplink_mask\n uplink_gw = args.uplink_gw\n uplink_ip = args.uplink_ip\n uplink_mac = args.uplink_mac\n gw_name = args.gw_name\n gw_port = args.gw_port\n gw_vlan = args.gw_vlan\n\n # Logging settings\n if debug:\n log_level = logging.DEBUG\n elif verbose:\n log_level = logging.INFO\n else:\n log_level = logging.WARNING\n\n logging.basicConfig(filename=log_file, format='%(asctime)s %(levelname)s %(message)s', level=log_level)\n logger = logging.getLogger(__name__)\n\n # Getting user password for Nuage connection\n if nuage_password is None:\n logger.debug('No command line Nuage password received, requesting Nuage password from user')\n nuage_password = getpass.getpass(prompt='Enter password for Nuage host %s for user %s: ' % (nuage_host, nuage_username))\n\n try:\n # Connecting to Nuage\n logger.info('Connecting to Nuage server %s:%s with username %s' % (nuage_host, nuage_port, nuage_username))\n nc = vsdk.NUVSDSession(username=nuage_username, password=nuage_password, enterprise=nuage_enterprise, api_url=\"https://%s:%s\" % (nuage_host, nuage_port))\n nc.start()\n\n except Exception as e:\n logger.error('Could not connect to Nuage host %s with user %s and specified password' % (nuage_host, nuage_username))\n logger.critical('Caught exception: %s' % str(e))\n return 1\n\n nuage_user = nc.user\n\n\n # Getting the parentID of FIP subnet\n logger.debug('Getting FIP subnet parent ID')\n fip_obj = nuage_user.subnets.get_first(filter=\"address == '{0}'\".format(fip_net))\n \n # Fail if FIP subnet object was not found\n if not fip_obj:\n logger.critical('FIP subnet {0} was not found'.format(fip_net))\n return 1\n\n shared_resource_id = fip_obj.parent_id\n logger.debug('FIP parent ID is: {0}'.format(shared_resource_id))\n\n\n # Locating a gateway port and creating a new VLAN\n logger.debug('Creating a new VLAN on Gateway port')\n new_vlan = vsdk.NUVLAN(value=gw_vlan)\n gw = nuage_user.gateways.get_first(filter=\"name == '{0}'\".format(gw_name))\n\n # Fail if Gateway was not found\n if not gw:\n logger.critical('Gateway {0} was not found'.format(gw_name))\n return 1\n\n port = gw.ports.get_first(filter=\"name == '{0}'\".format(gw_port))\n\n # Fail if Port requirements are not met\n if not port:\n logger.critical('Port {0} was not found on Gateway {1}'.format(gw_port, gw_name))\n return 1\n elif not port.port_type == 'ACCESS':\n logger.critical('Port {0} is not an ACCESS port type'.format(gw_port))\n return 1\n elif not int(gw_vlan) in range(*[int(x) for x in port.vlan_range.split('-')]):\n logger.critical('Vlan {0} is not part of the port vlan range: {1}'.format(gw_vlan, port.vlan_range))\n return 1\n elif port.vlans.get_first(filter=\"value == {0}\".format(gw_vlan)):\n logger.critical('Vlan {0} already exists on port {1}'.format(gw_vlan, gw_port))\n return 1\n\n port.create_child(new_vlan)\n vlan_id = new_vlan.id\n logger.debug('New VLAN ID is: {0}'.format(vlan_id))\n\n\n # Constructing an Uplink Subnet object\n logger.debug('Creating an Uplink Subnet')\n shared_subnet = vsdk.NUSharedNetworkResource(name='uplink subnet {0}'.format(uplink_addr.replace('.','-')), \\\n description='Uplink subnet to Gateway {0}'.format(gw_name.replace('.','-')), \\\n address=uplink_addr, \\\n netmask=uplink_mask, \\\n gateway=uplink_gw, \\\n type=UPLINK_TYPE, \\\n uplink_interface_ip=uplink_ip, \\\n uplink_interface_mac=uplink_mac, \\\n uplink_gw_vlan_attachment_id=vlan_id, \\\n shared_resource_parent_id=shared_resource_id, \\\n uplink_vport_name = 'uplink vport {0} Vlan{1}'.format(gw_port, gw_vlan))\n\n # Creating a subnet on VSD\n nuage_user.create_child(shared_subnet)\n\n logger.info('Uplink Subnet is created')\n return 0"
] | [
"0.68752897",
"0.55359006",
"0.5426499",
"0.5346168",
"0.53395516",
"0.52728474",
"0.52551156",
"0.52248836",
"0.5213356",
"0.5169228",
"0.5133192",
"0.51267165",
"0.51057464",
"0.5069011",
"0.50680554",
"0.50431585",
"0.50287765",
"0.5025044",
"0.49971265",
"0.49971265",
"0.49772736",
"0.49476212",
"0.48730898",
"0.4861459",
"0.4860002",
"0.48579806",
"0.4857506",
"0.4829285",
"0.48253983",
"0.48249036"
] | 0.64597714 | 1 |
Get the mode of a categorical and cast to onehot. | def one_hot_categorical_mode(logits):
dist = tfd.Categorical(logits)
return tf.cast(tf.one_hot(dist.mode(), dist.event_size), tf.float32) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def to_categorical(x, n_col=None):\n if not n_col:\n n_col = np.amax(x) + 1\n one_hot = np.zeros((x.shape[0], n_col))\n one_hot[np.arange(x.shape[0]), x] = 1\n return one_hot",
"def to_categorical(target: np.ndarray, n_classes: int = None) -> np.ndarray:\n\tn_classes = n_classes if n_classes is not None else np.max(target) + 1\n\tbatch_size = target.shape[0]\n\tone_hot = np.zeros((batch_size, n_classes))\n\tone_hot[np.arange(batch_size), target] = 1\n\treturn one_hot",
"def one_hot(labels, classes=None):\n return K.utils.to_categorical(labels, classes)",
"def _onehot(y, n_classes=False):\n if not n_classes:\n \"\"\"Create one-hot encoded labels.\"\"\"\n n_classes = len(set(y))\n out = np.zeros((len(y), n_classes))\n for i, ii in enumerate(y):\n out[i][ii] += 1\n y_onehot = out.astype(int)\n return y_onehot",
"def one_hot(y_):\n y_ = y_.reshape(len(y_))\n n_values = int(np.max(y_)) + 1\n n_values = 6\n return np.eye(n_values)[np.array(y_, dtype=np.int32)] # Returns FLOATS",
"def one_hot_encode(df, col):\n return pd.get_dummies(df, columns=[col], drop_first=True)",
"def To1hot(label,num_class):\n onehot = np.zeros(num_class)\n onehot[label] = 1\n return onehot",
"def from_one_hot_to_category_indices(data: ndarray):\n return np.argmax(data, axis=-1)",
"def one_hot_converter(column):\n # encode class values as integers\n encoder = LabelEncoder()\n encoder.fit(column) \n encoded_ = encoder.transform(column)\n # convert integers to dummy variables, i.e., one-hot encoded\n encoded_column = to_categorical(encoded_)\n \n return encoded_column",
"def one_hot(y, num_classes):\n return np.eye(num_classes)[y]",
"def label_to_one_hot(label, num_of_class=2):\r\n import numpy as np\r\n one_hot = np.zeros((len(label), num_of_class), dtype=np.uint8)\r\n for i in range(len(label)):\r\n one_hot[i, int(label[i] - 1)] = 1 # label is 1 and 2\r\n\r\n return one_hot",
"def one_hot(x, num_classes, dtype=jnp.float32):\n return jax.nn.one_hot(x, num_classes).astype(dtype)",
"def to_categorical(y, num_classes):\n\treturn np.eye(num_classes, dtype='uint8')[y]",
"def one_hot(labels, classes=None):\n\n one_hot_ = K.utils.to_categorical(labels, classes)\n return(one_hot_)",
"def convert_to_one_hot(a):\n a = a[:, 0]\n a = a.astype(int)\n A = np.zeros((len(a), config.num_classes))\n A[np.arange(len(a)), a] = 1\n return A",
"def process_categorical_data(data_df):\n return pd.get_dummies(data_df, columns=Columns.categorical)",
"def to_categorical(y, num_classes):\n return np.eye(num_classes, dtype=\"uint8\")[y]",
"def to_categorical(y, num_classes):\n return np.eye(num_classes, dtype='uint8')[y]",
"def to_categorical(y, num_classes):\n return np.eye(num_classes, dtype='uint8')[y]",
"def to_categorical(y, num_classes):\n return np.eye(num_classes, dtype='uint8')[y]",
"def to_categorical(y, num_classes):\n return np.eye(num_classes, dtype='uint8')[y]",
"def to_categorical(y, num_classes):\n return np.eye(num_classes, dtype='uint8')[y]",
"def one_hot_encode(self, meta_field):\n one_hot = pd.get_dummies(self.sample_meta[meta_field]).values\n return one_hot",
"def one_hot_encoder(df, nan_as_category=True):\n original_columns = list(df.columns)\n categorical_columns = [col for col in df.columns if\n df[col].dtype == 'object']\n df = pd.get_dummies(df, columns=categorical_columns,\n dummy_na=nan_as_category)\n new_columns = [c for c in df.columns if c not in original_columns]\n return df, new_columns",
"def convert_to_one_hot(a,max_val=None):\n N = a.size\n data = np.ones(N,dtype=int)\n sparse_out = sparse.coo_matrix((data,(np.arange(N),a.ravel())), shape=(N,max_val))\n return np.array(sparse_out.todense())",
"def _one_hot_encoder(self):\n ohe = preprocessing.OneHotEncoder()\n ohe.fit(self.dataframe[self.cat_feats])\n return ohe.transform(self.dataframe_d_copy[self.cat_feats])",
"def to_onehot(x, num_classes):\n return np.eye(num_classes, dtype='float32')[x]",
"def to_categorical(x, num_classes):\n return np.eye(num_classes, dtype='uint8')[x]",
"def to_categorical(y, num_classes):\n arr = np.eye(num_classes)[y]\n tensor = torch.LongTensor(arr)\n return autograd.Variable(tensor)",
"def labels_to_one_hot(\n labels: np.ndarray, categories: int, axis: int = 0,\n keepdims=False, dtype=bool\n):\n if keepdims:\n assert labels.shape[axis] == 1\n result_ndim = labels.ndim\n else:\n result_ndim = labels.ndim + 1\n\n if axis < 0:\n axis += result_ndim\n\n shape = labels.shape\n zeros = np.zeros((categories, labels.size), dtype=dtype)\n zeros[labels.ravel(), range(labels.size)] = 1\n\n zeros = zeros.reshape((categories,) + shape)\n\n if keepdims:\n zeros = zeros[(slice(None),) * (axis + 1) + (0,)]\n\n zeros = np.moveaxis(zeros, 0, axis)\n\n return zeros"
] | [
"0.7203805",
"0.7131759",
"0.69734716",
"0.69353926",
"0.6869692",
"0.68215334",
"0.6793382",
"0.6789208",
"0.67846286",
"0.67513657",
"0.67081213",
"0.66956395",
"0.6684978",
"0.6679585",
"0.6672036",
"0.6661204",
"0.6650561",
"0.66481876",
"0.66481876",
"0.66481876",
"0.66481876",
"0.66481876",
"0.66232896",
"0.6592918",
"0.6581756",
"0.6578454",
"0.65738225",
"0.6570303",
"0.65669376",
"0.65476507"
] | 0.77782106 | 0 |
Given the sys.argv as a list of strings, this method returns the sublist right after the '' element (if present, otherwise returns an empty list). | def _get_argv_after_doubledash(self):
try:
idx = sys.argv.index("--")
return sys.argv[idx+1:] # the list after '--'
except ValueError as e: # '--' not in the list:
return [] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_argv_list(args):\n # parse the string format of arguments and return a list of arguments\n argv = args.split(' ')\n if len(argv) == 1 and argv[0] == '':\n return []\n return argv",
"def pop_first_arg(argv):\n for arg in argv:\n if not arg.startswith('-'):\n argv.remove(arg)\n return (arg, argv)\n\n return (None, argv)",
"def input_args():\n return filter(lambda x: len(x) > 0,\n map(lambda x: x.strip(), sys.argv[1:]))",
"def arglist(self) -> List:\n return self.argv[1:]",
"def extract_option(prefix, args):\n if prefix in ('#',):\n unique = False\n else:\n unique = True\n value = [a for a in args if a.startswith(prefix)]\n if len(value) == 1:\n value = value[0]\n args.remove(value)\n value = value[1:]\n if not unique:\n return [value]\n return value\n elif len(value) > 1 and unique:\n print('More than one %s found in args' % prefix)\n sys.exit(1)\n elif len(value) > 1 and not unique:\n for v in value:\n if v in args:\n args.remove(v)\n return [v[1:] for v in value]\n return None",
"def split_args(args):\n double_dash_pos = [i for i, x in enumerate(args) if x == '--']\n if not double_dash_pos:\n return (args, [])\n else:\n double_dash_pos = double_dash_pos[0]\n return (args[:double_dash_pos], args[double_dash_pos+1:])",
"def _get_input_from_argv():\n payload_index = sys.argv.index('--') + 1\n params = sys.argv[payload_index:]\n if not params:\n raise ValueError(\n \"A JSON payload was expected after the -- delimiter, but none \"\n \"was found.\")\n return ' '.join(params)",
"def argv(self) -> List[str]:\n if self.command:\n rtn = [utils.strip_quotes(self.command)]\n for cur_token in self.arg_list:\n rtn.append(utils.strip_quotes(cur_token))\n else:\n rtn = []\n\n return rtn",
"def cut_off_rest(arg):\n return arg.split(' : ')[0]",
"def remaining_args(opts_plus_args, arg_list):\n pattern = '\\s+'.join(re.escape(a) for a in arg_list) + '\\s*$'\n matchObj = re.search(pattern, opts_plus_args)\n try:\n remaining = opts_plus_args[matchObj.start():]\n except AttributeError:\n # Don't preserve spacing, but at least we don't crash and we do preserve args and their order\n remaining = ' '.join(arg_list)\n\n return remaining",
"def remaining_args(opts_plus_args, arg_list):\n pattern = '\\s+'.join(re.escape(a) for a in arg_list) + '\\s*$'\n match_obj = re.search(pattern, opts_plus_args)\n try:\n remaining = opts_plus_args[match_obj.start():]\n except AttributeError:\n # Don't preserve spacing, but at least we don't crash and we do preserve args and their order\n remaining = ' '.join(arg_list)\n\n return remaining",
"def process_command_line_input():\n\n input_args = sys.argv\n if input_args[0].find('ipython') >= 0:\n input_args = list()\n else:\n input_args.pop(0)\n\n return input_args",
"def argumentSplit(s, nargs, pad=True):\n\tif s:\n\t\ts = newshlex(StringIO(s)) # use non-C StringIO for (somewhat) unicode support?\n\t\ti = 0\n\t\targs = []\n\t\twhile (i < nargs -1) or nargs == -1: # allows to split entire string\n\t\t\ttok = s.get_token()\n\t\t\tif not tok: break\n\t\t\targs.append(tok)\n\t\t\ti += 1\n\t\trest = s.instream.read().strip() \t#TODO: should this really be stripping here? Without strip:\n\t\tif rest:\t\t\t\t\t\t\t# >>> argumentSplit('one \"two three\" four', 3)\n\t\t\targs.append(rest)\t\t\t\t# ['one', 'two three', ' four']\n\t\t\ti += 1\n\t\tif pad:\n\t\t\twhile i < nargs:\n\t\t\t\targs.append(None)\n\t\t\t\ti += 1\n\t\treturn args\n\telse:\n\t\tif pad: return [None]*nargs\n\t\telse: return ()",
"def first_last_item(input_list: list) -> list:\n\n if len(input_list) > 1:\n return [input_list[0], input_list[-1]]\n else:\n return []",
"def parse_forest_args(argv=None):\n if argv is None:\n argv = sys.argv\n if \"bokeh\" in os.path.basename(argv[0]):\n i = argv.index(\"--args\")\n return argv[i + 1 :]\n else:\n _, argv = forest.cli.main.parse_args(argv)\n return argv[1:]",
"def cfgPathToList( arg ):\n from types import StringTypes\n listPath = []\n if type( arg ) not in StringTypes:\n return listPath\n while arg.find( '/' ) == 0:\n arg = arg[1:]\n return arg.split( '/' )",
"def process_list_arg(arg):\n if isinstance(arg, list):\n return arg\n elif isinstance(arg, basestring):\n args = []\n for part in arg.split(\",\"):\n args.append(part.strip())\n return args",
"def read_list(name, default=NO_ARGUMENT, separator=\",\"):\n value = os.environ.get(name)\n if value is None:\n if default is NO_ARGUMENT:\n return []\n else:\n return default\n return [v.strip() for v in value.split(separator) if v.strip()]",
"def get_args(self) -> List[str]:\n return self.content.split()[1:]",
"def get_commandline_argument(argument: str, argument_list: list) -> str:\n length = len(argument_list)\n if length == 0:\n print('get_commandline_argument(): Error, empty argument_list passed, exiting.')\n exit(1)\n\n if str(argument) == '':\n # No argument passed.\n return ''\n\n if length == 1:\n # The argument list contains the script name only.\n return ''\n\n for i in range(1, length - 1):\n if str(argument_list[i]) == str(argument):\n if i + 1 <= length:\n # Only get the next index if we are still in the array bounds.\n return str(argument_list[i + 1])\n return ''",
"def get_args(self):\n req_argv = self._ptr.contents.argv\n args = []\n if bool(req_argv):\n i = 0\n while 1:\n s = bytestostr(req_argv[i])\n i += 1\n if s == None:\n break\n args.append(s)\n return args",
"def split_cmdline_filter_items(string):\n filter_items = string.split(',')\n return filter_items",
"def _get_parameter_list(self, raw_command): # pylint: disable=no-self-use\n contents = raw_command.split(' ')\n return [item for item in contents if item.startswith('-')]",
"def parse_optional_file_list_from_args(args_list: Any, append_error_func: Callable[[str], None]) -> List[str]:\n results = [] # type: List[str]\n if args_list is None:\n # No arguments\n pass\n elif isinstance(args_list, List):\n for c in args_list:\n if not os.path.exists(c):\n append_error_func(\"Given path %s does not exist!\" % c)\n results = list(args_list)\n else:\n append_error_func(\"Argument was not a list?\")\n return results",
"def _extract_non_empty_val(param_list):\n if param_list:\n value = param_list.pop(0)\n if value != '':\n return value\n return None",
"def __arg_list(self):\n args = []\n try:\n arg = self.__arg()\n args.append(arg)\n if arg.token.endswith(\"...\"):\n return args\n\n while True:\n self.match_value(Punctuator, \",\")\n\n arg = self.__arg()\n if arg.token.endswith(\"...\"):\n return args\n\n args.append(arg)\n except ParseError:\n return args",
"def split_list(self):\n wanted_parts = self.args.ncore\n alist = glob.glob(self.args.input + '*.root')\n length = len(alist)\n return [alist[i * length // wanted_parts: (i + 1) * length // wanted_parts]\n for i in range(wanted_parts)]",
"def parse_list(entry, separator):\n r = [x for x in entry.split(separator)] if entry else None\n\n # for lists with only a single element, return just the element\n if isinstance(r, list) and len(r) == 1:\n return r[0]\n else:\n return r",
"def get_args():\n if len(sys.argv) == 3:\n return sys.argv[1:]\n print(\"USAGE: python3 extract_cds.py infile outfile\\n\\n\")\n exit()",
"def get_entry_point_command(\n entry_point: Optional[\"EntryPoint\"], parameters: Dict[str, Any]\n) -> List[str]:\n if entry_point is None:\n return []\n return entry_point.compute_command(parameters)"
] | [
"0.6772655",
"0.62367713",
"0.61961806",
"0.61672586",
"0.6163589",
"0.6071635",
"0.5893925",
"0.58778197",
"0.5852514",
"0.5829911",
"0.58101344",
"0.5773826",
"0.5649536",
"0.5600912",
"0.55947906",
"0.55472255",
"0.5522556",
"0.551393",
"0.5505596",
"0.54859275",
"0.54238886",
"0.54128456",
"0.5401597",
"0.5388908",
"0.5383195",
"0.5364",
"0.5330953",
"0.5302018",
"0.5281638",
"0.5272107"
] | 0.74402696 | 0 |
This method is expected to behave identically as in the superclass, except that the sys.argv list will be preprocessed using _get_argv_after_doubledash before. See the docstring of the class for usage examples and details. | def parse_args(self):
return super().parse_args(args=self._get_argv_after_doubledash()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run_from_argv(self, argv):\r\n self.progname = argv[0]\r\n super(Command, self).run_from_argv(argv)",
"def set_argv(self, string):\n try:\n self.argv = string.split(' ')\n except AttributeError:\n if string:\n self.argv = string\n else:\n self.argv = []",
"def hotfix_deepobs_argparse():\n sys.argv = sys.argv[:1]",
"def main_argv():\n main_parse_args(sys.argv[1:])",
"def main_argv():\n main_parse_args(sys.argv[1:])",
"def _get_argv_after_doubledash(self):\n try:\n idx = sys.argv.index(\"--\")\n return sys.argv[idx+1:] # the list after '--'\n except ValueError as e: # '--' not in the list:\n return []",
"def __init__(self, argv):\n self._argv = argv",
"def _test_argv(self, verbose, extra_argv):\r\n #self.package_path = os.path.abspath(self.package_path)\r\n argv = [__file__, self.package_path]\r\n argv += ['--verbosity', str(verbose)]\r\n if extra_argv:\r\n argv += extra_argv\r\n return argv",
"def sys_argv_reset():\n\n sys.argv = sys._argv\n del sys._argv",
"def get_cli_arguments(self):\n pass",
"def _add_argument(self, args=''):\n\n sys.argv += args.split(' ')",
"def argv(args: List[str]) -> ContextManager:\n argv = sys.argv\n sys.argv = args\n try:\n yield\n finally:\n sys.argv = argv",
"def redirect_sys_argv(*argv):\r\n original = list(sys.argv)\r\n sys.argv[:] = argv\r\n try:\r\n yield\r\n finally:\r\n sys.argv[:] = original",
"def setup_method(self, method):\n sys.argv.clear()\n sys.argv.append(\"\")\n sys.argv.append(\"spam\")",
"def argv(self) -> List[str]:\n if self.command:\n rtn = [utils.strip_quotes(self.command)]\n for cur_token in self.arg_list:\n rtn.append(utils.strip_quotes(cur_token))\n else:\n rtn = []\n\n return rtn",
"def _argsForSubprocess(self) -> list[str]:\n pass",
"def run_argv(self, argv):\n\n try:\n parsed_optional, parsed_mandatory = getopt.getopt(\n argv, '', [\n '%s=' %\n a for a in self.args_optional.keys()])\n except getopt.GetoptError as e:\n log.info('%s' % (e))\n self.help()\n return\n\n # Do a safe evaluation of the mandatory arguments\n line_args_mandatory = []\n for arg in parsed_mandatory:\n try:\n value = ast.literal_eval(arg)\n except:\n # Is an invaluable, use as string\n value = arg\n\n line_args_mandatory.append(arg)\n\n # Do a safe evaluation of the optional arguments\n line_args_optional = []\n for argfield, argvalue in parsed_optional:\n try:\n value = ast.literal_eval(argvalue)\n except:\n # Is an invaluable, use as string\n value = argvalue\n\n line_args_optional.append((argfield, value))\n\n # If less mandatory arguments are passed, abort\n if len(line_args_mandatory) < len(self.args_mandatory):\n log.info(messages.generic.error_missing_arguments_s %\n (' '.join(self.args_mandatory))\n )\n self.help()\n return\n\n # If there are more argument and we expect one, join all the\n # Remaining mandatory arguments\n elif len(line_args_mandatory) > 1 and len(self.args_mandatory) == 1:\n line_args_mandatory = [ ' '.join( line_args_mandatory ) ]\n\n # Merge stored arguments with line arguments\n stored_args = self.session[self.name]['stored_args'].copy()\n args = stored_args.copy()\n\n args.update(\n dict(\n (key.strip('-'), value) for\n (key, value) in line_args_optional)\n )\n\n args.update(dict((key, line_args_mandatory.pop(0))\n for key in self.args_mandatory))\n\n # Check if argument passed to bind_to_vectors matches with\n # some vector\n vect_arg_value = args.get(self.bind_to_vectors)\n if vect_arg_value and vect_arg_value not in self.vectors.get_names():\n log.warn(messages.module.argument_s_must_be_a_vector % self.bind_to_vectors)\n return\n\n # If module status is IDLE, launch setup()\n if self.session[self.name]['status'] == Status.IDLE:\n self.session[self.name]['status'] = self.setup(args)\n\n # If module status is FAIL, return\n if self.session[self.name]['status'] == Status.FAIL:\n log.warn(messages.module.module_s_inactive % self.name)\n return\n\n # Setup() could has been stored additional args, so all the updated\n # stored arguments are applied to args\n args.update(\n dict(\n (key, value) for key, value in self.session[self.name]['stored_args'].items()\n if value != stored_args[key]\n )\n )\n\n return self.run(args)",
"def _pre_argument_parsing(self):\n pass",
"def test_parseArgs_no_fix(self):\n \n argv = sys.argv.copy()\n fix = wcManager.parseArgs(argv[1:])\n self.assertEqual(wcManager.workMode.NORMAL, fix)",
"def _get_run_script_args(self):\n raise NotImplementedError",
"def _prepare(self):\n # Customize commandline arguments\n parser = argparse.ArgumentParser()\n self.initArgumentParser(parser, defaults=self.default_binding_overrides)\n self.__options = parser.parse_args()\n self.__bindings.update(args_util.parser_args_to_bindings(self.__options))\n\n self.start_logging()",
"def setup_args(cls) -> ParlaiParser:\n # we want to later deprecate this for add_cmdline_args",
"def process_command_line_input():\n\n input_args = sys.argv\n if input_args[0].find('ipython') >= 0:\n input_args = list()\n else:\n input_args.pop(0)\n\n return input_args",
"def _parse_arguments(self, argv):\n parser = argparse.ArgumentParser()\n for section in self.config.sections():\n for key in self.config[section]:\n arg_name = '--' + key.replace(' ', '_').lower()\n parser.add_argument(arg_name)\n override_kwargs = vars(parser.parse_args(argv))\n override_kwargs = {k: v for k,\n v in override_kwargs.items() if v is not None}\n self._overwrite_with_kwargs(**override_kwargs)",
"def parse_args(self):\n #-----------------------------------------------------------------------\n #This code is based on code from the KR Toolkit by Christian Muise\n #URL: http://code.google.com/p/krtoolkit/\n try:\n argv, opts, flags = sys.argv[1:], {}, []\n while argv:\n if argv[0][0:2] == '--':\n flags.append(argv[0])\n argv = argv[1:]\n elif argv[0][0] == '-':\n opts[argv[0]] = argv[1]\n argv = argv[2:]\n else:\n raise InputException(\"Badly constructed arg: \" +argv[0])\n except IndexError:\n raise InputException(\"Badly constructed arg: \" + argv[0])\n #-----------------------------------------------------------------------\n for flag in flags:\n if flag in self.program_flags:\n vars(self)[self.program_flags[flag].var_name] = True\n if self.program_flags[flag].function:\n self.program_flags[flag].function(self)\n else:\n raise InputException(\"Invalid flag: \" + flag)\n \n if not self.quiet:\n min_width = max(len('Flags:'),\n max(map(lambda x : len(x.description),\n self.program_args.itervalues()))) + 1\n if len(flags) == 0:\n print \"{:<{}} {}\".format('Flags:', min_width,'<None>')\n else:\n print \"{:<{}} {}\".format('Flags:', min_width,\n ', '.join(filter(lambda f : f in flags,\n self.program_flags)))\n \n for arg in opts:\n if arg not in self.program_args:\n raise InputException(\"Invalid arg: \" + arg)\n \n for arg in self.program_arg_order:\n arg_def = self.program_args[arg]\n if arg not in opts:\n if arg_def.needed:\n raise InputException(\"Error needed arg is missing: \" + arg)\n vars(self)[arg_def.var_name] = arg_def.default_value\n else:\n if arg_def.validator == None:\n vars(self)[arg_def.var_name] = opts[arg]\n else:\n vars(self)[arg_def.var_name] = arg_def.validator(opts[arg],\n arg_def.validator_args)\n if not self.quiet:\n print \"{:<{}} {}\".format(arg_def.description + ':', min_width,\n vars(self)[arg_def.var_name])",
"def _parse_args(self):\n parser = argparse.ArgumentParser()\n _, args = parser.parse_known_args()\n self.args = [a for a in args if a != '']",
"def argv_cleanup(request, autouse=True, scope=\"function\"):\n\n sys._argv = sys.argv\n request.addfinalizer(argv_cleaner)",
"def test_cmdlineproc_test2():\n\n parameters = {\n \"debug\": False,\n \"disconnect\": False,\n \"executable\": \"\",\n \"executableargs\": \"\",\n \"hosts\": \"\",\n \"job\": \"\",\n \"jobname\": \"\",\n \"log\": \"\",\n \"recover\": \"\",\n \"resource\": \"\",\n \"replicates\": \"\",\n \"verbose\": False\n }\n\n commandlineargs = [\"-about\"]\n\n longbowargs = _commandlineproc(ALLLONGBOWARGS, commandlineargs, parameters)\n\n assert parameters[\"executable\"] == \"\"\n assert parameters[\"executableargs\"] == \"\"\n assert longbowargs == [\"-about\"]",
"def _input_args(self, args: List[str]):\n assert self._call is None, f\"You need to specify all inputs before calling `{self._call}`\"\n assert isinstance(args, list), f\"{args} is a {type(args)}, expected a list of strings!\"\n assert len(args) > 0, f\"Expected a non-empty argument list!\"\n assert all(isinstance(a, str) for a in args), f\"Expected a list of strings, not {[type(a) for a in args]}!\"\n # all arguments could potentially be filenames that we write to, so let's just add them\n self._write_files |= set(args)\n # add dummy argument zero\n args = [\"\"] + args\n # allocate args in memory\n arg_strings = [self._str(a, \"arg\") for a in args]\n # allocate a pointer array for argv\n self.data += [f\"argv: .word \" + \" \".join(\"0\" for _ in range(len(args)))]\n # load argc and argv\n self._args += [\"\", \"# argument count in a0\", f\"li a0, {len(args)}\"]\n self._args += [\"\", \"# load pointers to argument strings into argv\", f\"la a1, argv\"]\n for ii, aa in enumerate(arg_strings):\n self._args += [f\"la t1, {aa}\", f\"sw t1, {ii * 4}(a1)\"]",
"def main(argv=None):\n\n if argv is None:\n argv = sys.argv[1:]"
] | [
"0.6951482",
"0.677626",
"0.6738199",
"0.6713902",
"0.6713902",
"0.6705866",
"0.6659652",
"0.65899885",
"0.6431687",
"0.6429928",
"0.6400785",
"0.6399408",
"0.6386593",
"0.6369564",
"0.6331361",
"0.6318897",
"0.63100475",
"0.6309721",
"0.62780434",
"0.6223838",
"0.6214132",
"0.6162766",
"0.6143449",
"0.6138308",
"0.6117984",
"0.61160356",
"0.60908705",
"0.6081815",
"0.6071846",
"0.604534"
] | 0.77736425 | 0 |
Sets velocity vector based on direction | def set_velocity(self):
if self.direction == 'left':
self.x_vel = -2
else:
self.x_vel = 2
self.y_vel = 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setVelocity(self,v):\n if v is None:\n self.v = Cartesian3DVector()\n else:\n if isinstance(v,Cartesian3DVector):\n self.v = Cartesian3DVector(v.x,v.y,v.z)\n else:\n raise CoordinateVector(\"Initializing a particle with the incorrect velocity vector type.\")",
"def velocity(self, X, Y):\n self.u = self.Vinf * np.ones_like(X)\n self.v = np.zeros_like(X)",
"def cmd_velocity(self, vn, ve, vd, heading):\n pass",
"def setVelocity(self, xvel, yvel):\n self.xvelocity = xvel\n self.yvelocity = yvel",
"def change_velocity(self, delta):\n self.velocity += delta",
"def change_velocity(self, delta):\n self.velocity += delta",
"def setVelocity(self, vel):\n self.shooterLPID.enable()\n self.shooterRPID.enable()\n\n self.shooterLPID.setSetpoint(vel)\n self.shooterRPID.setSetpoint(vel)",
"def new_velocity(self):\n self.velocity = self.vafter",
"def setVelocity(self, vfunc):\n n = 0\n if vfunc: n = vfunc.func_id()\n _cantera.wall_setVelocity(self.__wall_id, n)",
"def apply_velocity(self):\n self.position.data += self.velocity.data",
"def setVelocity(self, new_vel):\n\n self.vel = limiter(new_vel)",
"def update_velocity(self):\n # Set thruster (up/down) movement\n if self.thrusters:\n self.velocity_y -= self.gravity\n else:\n self.velocity_y += self.velocity_slowing\n\n # Set left movement\n if self.moving_left:\n self.velocity_x -= self.gravity\n else:\n if self.velocity_x < 0:\n self.velocity_x += self.velocity_slowing\n \n # Set right movement\n if self.moving_right:\n self.velocity_x += self.gravity\n else:\n if self.velocity_x > 0:\n self.velocity_x -= self.velocity_slowing",
"def set_vel(self, vel):\n vel = u.Quantity(vel, unit=u.km/u.s)\n self.vel = np.absolute(vel.value)",
"def _cb_cmd_vel(self,msg):\r\n print \"Walker velocity command received: \",msg\r\n vx=msg.linear.x\r\n vy=msg.linear.y\r\n vt=msg.angular.z\r\n self.start()\r\n self.set_desired_velocity(vx,vy,vt)",
"def set_velocity_y(self):\n self.__dy *= -(1+SPEED_UP)",
"def set_velocity(self):\n self.__dx = random.randint(1, MAX_X_SPEED)\n self.__dy = INITIAL_Y_SPEED\n if random.random() > 0.5:\n self.__dx = -self.__dx\n if random.random() > 0.5:\n self.__dy = -self.__dy",
"def setRotVelocity(self,r):\n self.rvel = r",
"def setVelocity(self, qd, qdd_base=10):\n self.set_register(REG_TYPE, SETPOINT_VELOCITY, 'int')\n self.set_register(REG_ACCELERATION, qdd_base, 'double')\n self.l2r(qd, REG_TARGET)",
"def velocity_step(self, dt, force):\r\n self.vel += dt * force / self.mass",
"def _update_vel(self) -> None:\n self.state[:, :, Boids.Attr.VEL] += self.state[:, :, Boids.Attr.ACC]\n self.state[:, :, Boids.Attr.VEL] = maglim(\n self.state[:, :, Boids.Attr.VEL], self.max_vel)",
"def calculate_velocity(self, speed):\n self.velocity.dx += math.cos(math.radians(self.angle)) * speed\n self.velocity.dy += math.sin(math.radians(self.angle)) * speed",
"def velocity(self, X, Y):\n self.u = (self.strength / (2 * np.pi) *\n (X - self.xc) / ((X - self.xc)**2 + (Y - self.yc)**2))\n self.v = (self.strength / (2 * np.pi) *\n (Y - self.yc) / ((X - self.xc)**2 + (Y - self.yc)**2))",
"def setRobotVelocity(self,vel):\n linear = vel[0]\n angular = vel[1]\n if not (self.orientation):\n # Kinematic model for differential robot.\n print(\"or 0\")\n\n wl = (linear - (WHEEL_SEPARATION / 2.) * angular) / WHEEL_DIAMETER\n wr = (linear + (WHEEL_SEPARATION / 2.) * angular) / WHEEL_DIAMETER\n\n # At input 1000, angular velocity is 1 cycle / s or 2*pi/s.\n velLeft = int(wl * BETA/(2*math.pi))\n velRight = int(wr * BETA/(2*math.pi))\n print(\"left: \" + str(velLeft))\n print(\"right: \" + str(velRight))\n self.setMotorSpeed(velRight,velLeft)\n else:\n print(\"or 1\")\n # Kinematic model for differential robot.\n wl = (linear - (WHEEL_SEPARATION / 2.) * angular) / WHEEL_DIAMETER\n wr = (linear + (WHEEL_SEPARATION / 2.) * angular) / WHEEL_DIAMETER\n\n # At input 1000, angular velocity is 1 cycle / s or 2*pi/s.\n velLeft = int(wl * BETA/(2*math.pi))\n velRight = int(wr * BETA/(2*math.pi))\n print(\"left: \" + str(velLeft))\n print(\"right: \" + str(velRight))\n self.setMotorSpeed(velRight, velLeft)",
"def update(self):\n self.velocity = [math.cos(self.angle), - math.sin(self.angle)]\n self.velocity = [self.speed * i for i in self.velocity]\n\n super().update()",
"def setDirectionTowardPoint(self, x, y, speed):\n currX = self.xcor()\n currY = self.ycor()\n # get actual vector from t to x,y\n dXactual = x - currX\n dYactual = y - currY\n\n # get the length of that vector. Can also use turtle.distance\n length = math.hypot(dXactual, dYactual)\n\n # now scale the vector\n try:\n self.dx = dXactual / length * speed\n self.dy = dYactual / length * speed\n except:\n self.dx = 0\n self.dy = 0",
"def update(self):\r\n # change in position -> velocity\r\n self.position += self.velocity\r\n # change in celocity -> acceleration\r\n self.velocity += self.acceleration\r\n \r\n # if velocity magnitude is higher than the defined limit set the velocity \r\n # magnitude to max speed\r\n if np.linalg.norm(self.velocity) > self.max_speed:\r\n self.velocity = self.velocity / np.linalg.norm(self.velocity) * self.max_speed\r\n \r\n # reset the acceleration\r\n self.acceleration = Vector(*np.zeros(2))",
"def set_ang_vel(self, otherframe, value):\n\n self._check_vector(value)\n self._check_frame(otherframe)\n self._ang_vel_dict.update({otherframe: value})\n otherframe._ang_vel_dict.update({self: -value})",
"def set_velocity_x(self):\n self.__dx *= -1",
"def step_velocity(self, force, timestep):\n self.velocity = self.velocity + (force * timestep)/self.mass",
"def cmd_vel_callback(self, msg):\n # Just store the desired velocity. The actual control runs on odometry callbacks\n v_l = msg.linear\n v_a = msg.angular\n self.v_linear_des = numpy.array([v_l.x, v_l.y, v_l.z])\n self.v_angular_des = numpy.array([v_a.x, v_a.y, v_a.z])"
] | [
"0.76582134",
"0.7481519",
"0.74589294",
"0.7403367",
"0.7303088",
"0.7303088",
"0.7276262",
"0.72136307",
"0.7208767",
"0.72049433",
"0.7171627",
"0.7148152",
"0.71405905",
"0.70739",
"0.70646393",
"0.7031484",
"0.69897455",
"0.69644165",
"0.6944296",
"0.69320303",
"0.6915458",
"0.68844056",
"0.6842323",
"0.68420625",
"0.6839891",
"0.68261206",
"0.67443967",
"0.6705521",
"0.66110986",
"0.660898"
] | 0.8153879 | 0 |
Log the changed names | def _log_changed_names(changed_names: Iterable[Tuple[str, str]]) -> None:
if not changed_names:
return
from .utils import logger
logger.warning("New names:")
for orig_name, new_name in changed_names:
logger.warning("* %r -> %r", orig_name, new_name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _name_changed ( self, name ):\n self.name_last = parse_name( name )[-1]\n self.inputs_changed()",
"def test_log_update_name(self):\n log_count_init = LoggerHistory.objects.count()\n original_name = self.project.name\n self.project.name = '%s UPDATED' % self.project.name\n self.project.save()\n\n log = LoggerHistory.objects.last()\n log_count = LoggerHistory.objects.count()\n\n self.assertNotEqual(log.user, {\n 'id': str(self.user.id),\n 'display_name': self.user.display_name})\n self.assertEqual(log.project, {\n 'id': str(self.project.id),\n 'name': self.project.name})\n self.assertEqual(log.usergroup, None)\n self.assertEqual(log.category, None)\n self.assertEqual(log.field, None)\n self.assertEqual(log.location, None)\n self.assertEqual(log.observation, None)\n self.assertEqual(log.comment, None)\n self.assertEqual(log.subset, None)\n self.assertEqual(log.action, {\n 'id': 'updated',\n 'class': 'Project',\n 'field': 'name'})\n self.assertEqual(log_count, log_count_init + 1)\n history = self.project.history.get(pk=log.historical.get('id'))\n self.assertEqual(history.id, self.project.id)\n self.assertEqual(history.name, original_name)",
"def rename(self, name):\n self._name = name\n self._logger = logging.getLogger(name)\n self._logger.setLevel(self._level)",
"def audit_names_in_metadata(self):\n\n # Iterate over commits....\n for commit in self.repository.commits.values():\n for name in [ commit.committer_name, commit.author_name ]:\n # Is the name whitelisted?\n if name in self.FullNameWhitelist:\n continue\n\n # As a special case, allow the name 'GitHub' for certain repositories\n if name == 'GitHub' and self.repository.path in self.GitHubPRWhitelist:\n self.__log_warning(commit.sha1, \"Commit has username 'GitHub' (web merge of PR); allowing anyway\")\n continue\n\n # Check to see if the name contains spaces - if not - it is probably misconfigured....\n if \" \" not in name.strip():\n self.__log_failure(commit.sha1, \"Non-full name: \" + name)\n continue",
"def show_event_names(self):\n print(\"Names: {}\".format(\" \".join(self.list_unique_names())))",
"def _name_changed(self):\n self._named = True",
"def ChangeLogExample():\n # get the changelog module and time series manager\n cmgr = app.Modules.Get('ChangeLog Manager');\n tsmgr = app.Modules.Get('Time series Manager');\n\n path = '/Misc/'\n oldName = 'OldTimeseries';\n newName = 'New name';\n \n # Get the Time Series\n fullpath = path + oldName\n ts = tsmgr.TimeSeriesList.Fetch(fullpath);\n \n # rename the series\n ts.Name = newName;\n \n # make a changelog record\n # public IChangeLogEntry AddChangeLogEntry(\n # IEntity entity,\n # string source,\n # string description,\n # Object data\n # )\n cmgr.AddChangeLogEntry(ts, \n 'ChangeLogExample script', \n \"Changing the name from %s to %s\" %(oldName, newName), \n None);\n \n ## update the series - the changelog will follow automatically\n tsmgr.TimeSeriesList.Update(ts);",
"def changed_names(self, directory):\n return [os.path.split(p)[1] for p in self.changed_paths(directory)]",
"def lvar_name_changed(self, *args):\n return _ida_hexrays.Hexrays_Hooks_lvar_name_changed(self, *args)",
"def change_name(change_account):\n change_data(change_account, changed_data='name')",
"def OnRenameTimer(self):\r\n\r\n self.EditLabel(self._current, self._curColumn)",
"def userRenamed(self, oldname, newname):\n # Send messasge to Server bot.\n self.data_in(text=\"\", type=\"renamed\", oldname=oldname, newname=newname)",
"def logTestName(self):\n logging.info('%s', self.id())",
"def client_name_updated(query):\n return rename_hook(query, \"window_name_change\", \"client_name_updated\")",
"def Notify(self):\r\n\r\n self._owner.OnRenameTimer()",
"def OnRenameTimer(self):\r\n \r\n self.Edit(self._current)",
"def generic_list_change_handler(old_value, new_value, changed_field):\n\n removed_names = [x for x in old_value if x not in new_value and x != '']\n added_names = [x for x in new_value if x not in old_value and x != '']\n\n message = \"\"\n if len(added_names) > 0:\n message += \"Added to %s: %s. \" % (changed_field, unicode(', '.join(added_names)))\n if len(removed_names) > 0:\n message += \"Removed from %s: %s. \" % (changed_field, unicode(', '.join(removed_names)))\n\n return message",
"def _on_node_name_changed(self, oldname, newname):\n if newname in self._nodes and self._nodes[oldname] != self._nodes[newname]:\n raise Exception(\"New name collides with existing node.\")\n node = self._nodes[oldname]\n self._nodes[newname] = node\n del self.__nodes[oldname]\n self.node_name_changed.emit(oldname, newname)",
"def getChanges():",
"def _students_items_changed(self, event):\n if event.added:\n print \"students added (index,name):\", event.index, event.added\n if event.removed:\n print \"students removed (index,name):\", event.index, event.removed",
"def new_name(self,new_name):\n self.name = new_name",
"def lin_log_changed(self):\n self.model.notifyObservers()",
"def audit_filename(self):\n\n for commit in self.repository.commits.values():\n for filename in commit.files_changed:\n if commit.files_changed[ filename ][\"change\"] not in [\"A\",\"R\",\"C\"]:\n continue\n for restriction in self.filename_limits:\n if re.search(restriction, filename):\n self.__log_failure(commit.sha1, \"Invalid filename: \" + filename)",
"def player_changename(event_var):\r\n debug.write(\"[SourceRPG] Handling player_changename\", 1)\r\n players[event_var['userid']]['name'] = event_var['newname']\r\n debug.write(\"[SourceRPG] player_changename handled\", 1)",
"def pNameChanged(self):\n\t\t \n\t\tpn_widget = self.ui.findChild(QWidget, \"p_name\")\n\t\tpatient_name = pn_widget.toPlainText()\n\t\tprint patient_name\n\t\t\n\t\t# Make a database query to check if the current name exists\n\t\t# note: query with \"like\" so that similar names can be suggested\n\t\t\n\t\t# if patient can be found, updating following things:\n\t\t# - SSN field next to patient name\n\t\t# - name, age, etc.\n\t\t# - clearing nerve_info field (sinister&dexter) to correspond summary\n\t\t#\to set CCombobox to \"Summary\"\n\t\tdb_query = True\n\t\tif db_query:\n\t\t\t# Patient with the given name has been found, setting patient data to summary view \n\t\t\tnerve_combo_box = self.ui.findChild(QWidget, \"nerve_box\")\t\t\n\t\t\tnerve_combo_box.setCurrentIndex(0)\n\t\t\tself.nerveChanged()",
"def changed(self):\n\t\tpass",
"def update_name(self, new_name):\r\n self.__name = new_name",
"def update_name(self, new_name):\r\n self.__name = new_name",
"def names():\n pass",
"def command_output_updated(self, logger):"
] | [
"0.66539043",
"0.6639276",
"0.6399256",
"0.6380432",
"0.63485473",
"0.62940335",
"0.619992",
"0.61700296",
"0.61023694",
"0.60812205",
"0.6038383",
"0.59753203",
"0.59630597",
"0.5961217",
"0.59277236",
"0.5906765",
"0.5823073",
"0.5801369",
"0.57996845",
"0.5778308",
"0.57772136",
"0.5765396",
"0.5745386",
"0.5729459",
"0.57077724",
"0.5688974",
"0.5685505",
"0.5685505",
"0.5673849",
"0.56515306"
] | 0.8118659 | 0 |
findframe(startdir, camera, grating, filename) This routine will search for a filename. It will start in startdir, then it will descend into a directory named after the camera+grating. After that, it will search subdirectories of the form w\d+. If the file is still not found, it fails | def findframe(startdir,camera,grating,filename):
grating = re.sub("\/","_",grating)
gdir = camera + grating
if os.path.isfile(os.path.join(startdir,filename)):
return(startdir)
else:
# now the grating directory
if os.path.isfile(os.path.join(startdir,gdir,filename)):
return(os.path.join(startdir,gdir))
poswavedirs = glob.glob(os.path.join(startdir,gdir) + '/w*')
wavedirs = []
for pwd in poswavedirs:
if re.search("w\d+",pwd):
wavedirs.append(pwd)
for wd in wavedirs:
# alright, now we search the subdirs
if os.path.isfile(os.path.join(wd,filename)):
return(wd)
return(None) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def findpaths(path):\n print('[INFO] Searching for .png images in ', path)\n frame_paths = []\n frame_to_path_dict = {}\n path_to_frame_dict = {}\n for root, dirs, files in os.walk(path, topdown=False):\n for name in files:\n if name.find('.png') != -1:\n frame_path = os.path.join(root, name)\n # NOTE: may want to change to deal with generic file names\n match = re.search(r'(?P<video_id>\\d+)_(?P<frame_id>\\d+).png', name)\n # video_id = int(match.group('video_id'))\n frame_id = int(match.group('frame_id'))\n frame_paths.append(frame_path)\n frame_to_path_dict[frame_id] = frame_path\n path_to_frame_dict[frame_path] = frame_id\n frame_paths_sorted = sorted(frame_paths, key=lambda x: int(path_to_frame_dict[x]))\n print('[INFO] %i frames located ' % (len(frame_paths)))\n return frame_paths_sorted, frame_to_path_dict, path_to_frame_dict",
"def dirCapture(parameter, path):\r\n # N should be argument of instances NO.\r\n N = parameter\r\n # set path for the captured frames\r\n cpath = path + '%d' % N + '/'\r\n # create directory if not exist\r\n while (os.path.exists(cpath)):\r\n # print('instance N%d' % N + ' exists')\r\n N = N + 1\r\n cpath = path + '%d' % N + '/'\r\n\r\n dir = os.path.dirname(cpath)\r\n # print('create folder'+cpath)\r\n os.makedirs(cpath)\r\n return N, cpath",
"def findGroove(targ):\n\n global grooveDB\n\n\n \"\"\" If no existing DB we load them from each dir in libpath. \n\n \"\"\"\n\n if not grooveDB:\n grooveDB=[]\n for dir in gbl.autoLib:\n g=loadDB(dir)\n if g:\n grooveDB.append([dir, g])\n\n if not grooveDB: # BS value so we don't keep trying to load\n grooveDB = [['', {}]]\n\n\n \"\"\" Search the dict for a match. \n\n grooveDir[] structure ... [ [dirname, g], [] ]\n\n g ... is a dict. Key = filename, data = list of grooves\n\n RETURN: Lib-Filename if found\n None if not found\n \"\"\"\n\n for dir, g in grooveDB:\n for filename, namelist in g.items():\n if targ in namelist:\n return os.path.join(dir,filename)\n\n return None",
"def get_frame_by_frame(name=None, fps=4, write_to_disk=False, display_feed=False, on_capture=None):\n\n reset_camera()\n\n if name is None:\n name = \"fbf_\" + str(int(time()))\n \n dname = None\n if write_to_disk:\n chdir(cwd)\n dname = join(dirname(realpath(sys.argv[0])), \"train\", \"data\", name)\n if not exists(dname):\n print(\"Created dir: %s\" % dname)\n mkdir(dname)\n else:\n print(\"Using dir: %s\" % dname)\n else:\n print('Not writing to disk')\n\n def _snap(name, dname, write, display, capture_callback):\n global camera\n s, img = camera.read()\n\n if s and capture_callback:\n img = capture_callback(img)\n\n if s and display:\n cv2.imshow(name, img)\n cv2.waitKey(1) \n\n if write:\n chdir(dname)\n number_of_files = len([item for item in os.listdir(dname) if os.path.isfile(os.path.join(dname, item))])\n path = \"./\" + str(number_of_files + 1) + \".png\"\n if s:\n imwrite(path, img)\n print(\"Saved to \" + dname + \"/\" + str(number_of_files + 1) + \".png\")\n else:\n print(\"Could not read image %d from camera\" % (number_of_files + 1))\n chdir(cwd)\n\n return Timer(1 / fps, _snap, name, dname, write_to_disk, display_feed, on_capture).use_mp()",
"def find_file_in_parent_dir(subdirectory, filename_pattern):\n current_dir = subdirectory\n while True:\n # See if the current directory contains the desired file:\n for name in os.listdir(current_dir):\n full_path = os.path.join(current_dir, name)\n matches_pattern = fnmatch.fnmatch(name, filename_pattern)\n if matches_pattern and os.path.isfile(full_path):\n return full_path\n # Get the next directory up:\n last_dir = current_dir\n current_dir = os.path.dirname(current_dir)\n # Check to see if we have reached the root directory:\n if last_dir == current_dir:\n return None",
"def FindFile(seeker):\n\n for folder in var.MOD_LOCATION:\n for file in os.listdir(folder):\n if file.lower() == seeker.lower():\n if not folder.endswith((\"/\", \"\\\\\")):\n folder = folder + \"\\\\\"\n return folder, file\n\n if True in [slash in seeker for slash in (\"/\", \"\\\\\")]:\n return GetFile(seeker) # Full path\n\n raise FileNotFoundError(seeker) # Exit out if the mod could not be found",
"def find_frame_files(self, ftype, calib_ID=None):\n return self.frame_paths(self.find_frames(ftype, calib_ID=calib_ID))",
"def find_file(self,start_dir=None,pattern=\"*\",file_extention=\"*.fif\",recursive=True,debug=False,abspath=False,\n ignore_case=False):\n pattern = self.update_pattern(pattern,ignore_case=ignore_case)\n if not isinstance(file_extention,(list)):\n s = file_extention\n file_extention = list()\n file_extention.append(s)\n \n if debug or self.debug:\n logging.debug(\"start dir : {}\\n\".format(start_dir) +\n \" -> glob pattern : {}\\n\".format(pattern) +\n \" -> file extention : {}\\n\".format(file_extention) +\n \" -> glob recursive : {}\\n\".format(recursive) +\n \" -> adding abs path: {}\\n\".format(abspath)\n )\n \n with self.working_directory(start_dir):\n for fext in file_extention:\n for f in glob.glob(pattern + fext,recursive=recursive): # ToDo fext re /\\.vhdr|vmrk|eeg$/\n if abspath:\n yield os.path.abspath(os.path.join(start_dir,f))\n else:\n yield f",
"def parse_directory(path, key_func=lambda x: x[-11:],\n rgb_prefix='img_',\n flow_x_prefix='flow_x_',\n flow_y_prefix='flow_y_',\n level=1):\n print('parse frames under folder {}'.format(path))\n if level == 1:\n frame_folders = glob.glob(os.path.join(path, '*'))\n elif level == 2:\n frame_folders = glob.glob(os.path.join(path, '*', '*'))\n else:\n raise ValueError('level can be only 1 or 2')\n\n def count_files(directory, prefix_list):\n lst = os.listdir(directory)\n cnt_list = [len(fnmatch.filter(lst, x+'*')) for x in prefix_list]\n return cnt_list\n\n # check RGB\n frame_dict = {}\n for i, f in enumerate(frame_folders):\n all_cnt = count_files(f, (rgb_prefix, flow_x_prefix, flow_y_prefix))\n k = key_func(f)\n\n x_cnt = all_cnt[1]\n y_cnt = all_cnt[2]\n if x_cnt != y_cnt:\n raise ValueError(\n 'x and y direction have different number '\n 'of flow images. video: ' + f)\n if i % 200 == 0:\n print('{} videos parsed'.format(i))\n\n frame_dict[k] = (f, all_cnt[0], x_cnt)\n\n print('frame folder analysis done')\n return frame_dict",
"def find_file_by_name(point,target):\n launcher_path = \"\"\n for (path, dir, files) in os.walk(point):\n for filename in files:\n # if target in filename:\n if filename == target:\n launcher_path = os.path.join(path, filename)\n return launcher_path",
"def open_frame(path,number):\n num=str(number).zfill(3) #Zero filling\n name = glob.glob(path+\"/*\"+num+\"*\")\n if len(name)==0:\n name = glob.glob(path+\"/\"+str(number)+\".png\")\n if len(name)>1:\n print \"too many matches \",len(name),\" found\"\n name = name[0]\n img = Image.open(name)\n img = np.asarray(img)\n img.setflags(write=1)\n return img",
"def findMayaFiles(directory):\n\n pass",
"def findAudioFile(self,kinfilename):\r\n progress = QtGui.QProgressDialog(\"Searching for Audio File...\",QtCore.QString(), 0,0,parent = self)\r\n progress.setMinimumDuration(250)\r\n\r\n (filename, _) = os.path.splitext(os.path.basename(kinfilename))\r\n pattern = filename.split('_BPC')[0] + '.wav'\r\n fullpath = os.path.dirname(kinfilename)\r\n toSearch = []\r\n audiofilename = None\r\n for i in self.find(fullpath,'/'):\r\n toSearch.append(fullpath[:i])\r\n toSearch.reverse()\r\n toSearch.append(fullpath)\r\n for loc in toSearch:\r\n for filenames in os.walk(loc):\r\n for filename in filenames:\r\n if pattern in filename:\r\n desiredFile = filename\r\n for files in desiredFile:\r\n if pattern == files:\r\n audiofilename = os.path.join(filenames[0],files)\r\n progress.destroy()\r\n return audiofilename\r\n if audiofilename is None:\r\n progress.destroy()\r\n return audiofilename",
"def get_dir_for_fname(directory, filename):\r\n for fname, dirpath in get_all_files(directory):\r\n if fname == filename:\r\n return dirpath\r\n return None",
"def recursively_find_file(folder, file_name):\n # TODO: print a hint when not founding file_name",
"def find(name):\n\n if os.path.exists(name):\n return name\n\n path = os.path.dirname(__file__) or '.'\n filename = os.path.abspath(os.path.join(path,name))\n if os.path.exists(filename):\n return filename\n\n for d in os.listdir(path):\n fullpath = os.path.abspath(os.path.join(path,d))\n if os.path.isdir(fullpath):\n filename = os.path.abspath(os.path.join(fullpath,name))\n if os.path.exists(filename):\n return filename\n return None",
"def gen_find(filepat, top):\n for path, dir_list, file_list in os.walk(top):\n for name in fnmatch.filter(file_list, filepat):\n yield os.path.join(path, name)",
"def find_video_of_file(\n self, video_dir: Union[str, os.PathLike], filename: str\n ) -> Union[str, os.PathLike]:\n try:\n all_files_in_video_folder = [\n f for f in next(os.walk(video_dir))[2] if not f[0] == \".\"\n ]\n except StopIteration:\n raise NoFilesFoundError(msg=f\"No files found in the {video_dir} directory\")\n all_files_in_video_folder = [\n os.path.join(video_dir, x) for x in all_files_in_video_folder\n ]\n return_path = None\n for file_path in all_files_in_video_folder:\n _, video_filename, ext = get_fn_ext(file_path)\n if (video_filename == filename) and (\n (ext.lower() == \".mp4\") or (ext.lower() == \".avi\")\n ):\n return_path = file_path\n\n if return_path is None:\n NoFileFoundWarning(\n f\"SimBA could not find a video file representing {filename} in the project video directory\"\n )\n return return_path",
"def find_project_directory(start=\".\", look_for=None):\n look_for = set(look_for or DEFAULT_PROJECT_INDICATORS)\n\n directory = path.path(start).abspath()\n\n while directory.parent != directory:\n items = os.listdir(directory)\n if any(i in look_for for i in items):\n return directory\n\n directory = directory.parent\n\n raise WatsonError('%s does not look like a project subdirectory' % start)",
"def extract_frames():\n vc = cv2.VideoCapture(INPUT_FILE)\n c=1\n\n if vc.isOpened():\n rval , frame = vc.read()\n else:\n rval, frame = False, False\n\n while rval:\n # cv2.imwrite((MODIFIED_FRAMES_DIR + 'img' + str(c) + '.jpg'),frame)\n cv2.imwrite((MODIFIED_FRAMES_DIR + str(c) + '.jpg'),frame)\n c = c + 1\n cv2.waitKey(1)\n rval, frame = vc.read()\n vc.release()\n print(\"All frames extracted successfully...\")",
"def search_file(filename, search_path, pathsep=os.pathsep):\n for path in string.split(search_path, pathsep):\n candidate = os.path.join(path, filename)\n if os.path.exists(candidate): return os.path.abspath(candidate)\n return None",
"def search_file(filename, search_path, pathsep=os.pathsep):\n for path in string.split(search_path, pathsep):\n candidate = os.path.join(path, filename)\n if os.path.exists(candidate): return os.path.abspath(candidate)\n return None",
"def locate(pattern, root=os.curdir):\n for path, dirs, files in os.walk(os.path.abspath(root)):\n for filename in fnmatch.filter(files, pattern):\n yield os.path.join(path, filename)",
"def find_files(directory, pattern='**/*.wav'):\n return glob(os.path.join(directory, pattern), recursive=True)",
"def find_files(config, slot='*'):\n f_pattern = os.path.join(os.path.join(config['path'],config['led_name']), slot+'*' + config['led_name'] + '*'\n + config['current'] + '*' + config['exp_time'] + '*'\n + config['xpos'] + '*' + config['ypos'] + '*')\n print(f_pattern)\n return glob.glob(f_pattern)",
"def extract_frames_from_directory(count, source, destination):\n all_videos = os.listdir(source)\n print(all_videos)\n\n for video in all_videos:\n video_file = source + video # Retrieve a video from the OverHeadPress\n cap = cv2.VideoCapture(video_file) # capturing the video from the given path\n dim = (224, 224)\n\n while cap.isOpened():\n frame_id = cap.get(1) # current frame number\n ret, frame = cap.read()\n if not ret:\n break\n\n # We are capturing at 28 frames per second. \n # If we want to capture every 0.2 seconds we will take every 5 frames\n if frame_id % 8 == 0:\n filename =\"frame%d.jpg\" % count\n count+=1\n resized = cv2.resize(frame, dim)\n cv2.imwrite(destination + filename, resized)\n\n cap.release()\n print (\"Finished processing: \" + video + \". Ended at video: \" + str(count))",
"def locate_camera_frame(self, exp_id, left_cam_id, right_cam_id, frame_id):\n return Response(self.gen_locate_camera_frame(exp_id,\n left_cam_id, right_cam_id,\n frame_id),\n mimetype='multipart/x-mixed-replace; boundary=frame')",
"def find_file(pattern, base='.'):\n\n regex = re.compile(pattern)\n matches = []\n\n for root, dirs, files in os.walk(base):\n for f in files:\n if regex.match(f):\n matches.append(path.join(base, f))\n return matches",
"def videoFrames(filename, framerate=1):\n vid_file = os.path.join(os.path.dirname(os.getcwd()), \"Database\", \"Video\", filename)\n print(vid_file)\n assert os.path.isfile(vid_file), \"Given path is not a valid file\"\n tmpdir = os.path.join(os.getcwd(), \"tmp\")\n subprocess.run(\n [\n \"ffmpeg\",\n \"-i\",\n vid_file,\n \"-r\",\n f\"{framerate}\",\n os.path.join(tmpdir, \"img_%04d.jpg\"),\n ]\n )\n return [os.path.join(tmpdir, i) for i in os.listdir(tmpdir) if not i.endswith(\".wav\")]",
"def createCameraRig(mochaFbxFilePath=None):\n if mochaFbxFilePath == None:\n gI()\n else:\n eq(mochaFbxFilePath)\n return"
] | [
"0.5503912",
"0.54061455",
"0.54023004",
"0.5358403",
"0.5319264",
"0.531263",
"0.5276591",
"0.5222761",
"0.5186886",
"0.5123499",
"0.5107922",
"0.50754553",
"0.50358665",
"0.50259393",
"0.50139326",
"0.5009165",
"0.5004129",
"0.50000185",
"0.4983033",
"0.49472937",
"0.49471304",
"0.49471304",
"0.4935282",
"0.49323508",
"0.49272498",
"0.4916675",
"0.4904694",
"0.4894232",
"0.48799258",
"0.48773277"
] | 0.92224705 | 0 |
C function definition int rtp_smooth(float im, int nx, int ny, int nz, float dx, float dy, float dz, unsigned short mask, float fwhm) | def smooth(self, mri_data):
"""DEBUG
import matplotlib.pyplot as plt
self = rtp_smooth
"""
# image dimension
nx, ny, nz = mri_data.img_data.shape
if hasattr(mri_data.img_header, 'info'):
dx, dy, dz = np.abs(mri_data.img_header.info['DELTA'])
elif hasattr(mri_data.img_header, 'get_zooms'):
dx, dy, dz = mri_data.img_header.get_zooms()[:3]
else:
self.errmsg("No voxel size information in mri_data header")
# Copy function image data and get pointer
fim0 = mri_data.img_data
if fim0.dtype != np.float32:
fim_arr = fim0.astype(np.float32)
else:
fim_arr = fim0
fim_arr = np.moveaxis(np.moveaxis(fim_arr, 0, -1), 0, 1).copy()
fim_p = fim_arr.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
self.rtp_smooth(fim_p, nx, ny, nz, dx, dy, dz, self.mask_byte_p,
self.blur_fwhm)
fim_arr = np.moveaxis(np.moveaxis(fim_arr, 0, -1), 0, 1)
return fim_arr | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def smoothImage(img):\n # Pillow uses RGB and cv2 uses GBR, so have to convert before and after smoothing\n imgBGR = cv2.cvtColor(np.asarray(img), cv2.COLOR_BGR2RGB)\n # smoothImgBGR = cv2.fastNlMeansDenoisingColored(imgBGR, None, 10,10,7,21)\n smoothImgBGR = cv2.bilateralFilter(imgBGR, 9, 75, 75)\n smoothImgRGB = cv2.cvtColor(smoothImgBGR, cv2.COLOR_BGR2RGB)\n return Image.fromarray(smoothImgRGB)",
"def apply_smoothstep(image):\n image_out = 3 * image**2 - 2 * image**3\n return image_out",
"def scipy_smooth(img, sigma=5):\n return ndimage.gaussian_filter(img, sigma=sigma)",
"def smooth_scipy(self, mri_data):\n\n # image dimension\n if hasattr(mri_data.img_header, 'info'):\n dx, dy, dz = np.abs(mri_data.img_header.info['DELTA'])\n elif hasattr(mri_data.img_header, 'get_zooms'):\n dx, dy, dz = mri_data.img_header.get_zooms()[:3]\n else:\n self.errmsg(\"No voxel size information in mri_data header\")\n\n # Set gaussian sigma in image dimension\n sigma = (self.blur_fwhm / np.array((dx, dy, dz))) / 2.354820\n imgdata = mri_data.img_data.astype(np.float64)\n\n # Apply mask\n if hasattr(self, 'maskV'):\n imgdata[~self.maskV] = 0\n\n # Apply Gaussian filter\n filt_img = gaussian_filter(imgdata, sigma, mode='constant')\n\n if hasattr(self, 'maskV'):\n # Adjust voxels with out of the mask (0) convolution\n aux_img = np.ones_like(imgdata)\n aux_img[~self.maskV] = 0\n filt_aux_img = gaussian_filter(aux_img, sigma, mode='constant')\n filt_img[self.maskV] /= filt_aux_img[self.maskV]\n\n return filt_img.astype(mri_data.img_data.dtype)",
"def smooth_with_function_and_mask(image, function, mask):\n bleed_over = function(mask.astype(float))\n masked_image = np.zeros(image.shape, image.dtype)\n masked_image[mask] = image[mask]\n smoothed_image = function(masked_image)\n output_image = smoothed_image / (bleed_over + np.finfo(float).eps)\n return output_image",
"def smooth_image(img_file=\"cy_double.png\"):\n \n oldimg, newimg, width, height, win = setup_image(img_file)\n\n for col in range(newimg.getWidth()):\n for row in range(newimg.getHeight()):\n p = newimg.getPixel(col, row)\n neighbors = []\n # Put the 8 surrounding pixels into neighbors\n for i in range(col-1, col+2):\n for j in range(row-1, row+2):\n try:\n neighbor = newimg.getPixel(i, j)\n neighbors.append(neighbor)\n except:\n continue\n nlen = len(neighbors)\n # Average out the RBG values\n if nlen:\n # Uncommented, the following line would leave most of the white \n # untouched which works a little better for real photographs, imo.\n #~ if nlen and p[0]+p[1]+p[2] < 690:\n p.red = sum([neighbors[i][0] for i in range(nlen)])/nlen\n p.green = sum([neighbors[i][1] for i in range(nlen)])/nlen\n p.blue = sum([neighbors[i][2] for i in range(nlen)])/nlen\n newimg.setPixel(col,row,p)\n\n write_image(img_file, newimg, win, \"_smooth\")",
"def smoothing(data, mask):\n smooth_data = gaussian_filter(data, [2, 2, 2, 0])\n\n Y = smooth_data[mask].T\n\n return Y",
"def smooth(image):\n image = convolve(image, gaussian2d(), mode='same')\n return image",
"def smooth_image(self, image, mask):\n \n filter_size = self.smoothing_filter_size.value\n if filter_size == 0:\n return image\n sigma = filter_size / 2.35\n #\n # We not only want to smooth using a Gaussian, but we want to limit\n # the spread of the smoothing to 2 SD, partly to make things happen\n # locally, partly to make things run faster, partly to try to match\n # the Matlab behavior.\n #\n filter_size = max(int(float(filter_size) / 2.0),1)\n f = (1/np.sqrt(2.0 * np.pi ) / sigma * \n np.exp(-0.5 * np.arange(-filter_size, filter_size+1)**2 / \n sigma ** 2))\n def fgaussian(image):\n output = scipy.ndimage.convolve1d(image, f,\n axis = 0,\n mode='constant')\n return scipy.ndimage.convolve1d(output, f,\n axis = 1,\n mode='constant')\n #\n # Use the trick where you similarly convolve an array of ones to find \n # out the edge effects, then divide to correct the edge effects\n #\n edge_array = fgaussian(mask.astype(float))\n masked_image = image.copy()\n masked_image[~mask] = 0\n smoothed_image = fgaussian(masked_image)\n masked_image[mask] = smoothed_image[mask] / edge_array[mask]\n return masked_image",
"def test_spatial_smoothing_xesmf(hindcast_recon_3d):\r\n he = hindcast_recon_3d\r\n he_bil = he.smooth(\"goddard\", method=\"bilinear\")\r\n he_patch = he.smooth(\"goddard\", method=\"patch\")\r\n assert he_bil.get_initialized().mean() != he_patch.get_initialized().mean()",
"def smooth(im, n=15):\n g = gaussKern(n)\n improc = signal.convolve2d(im, g, mode='same', boundary='symm')\n return improc",
"def smooth(dest, f):\n if f.is_Constant:\n # Return a scaled version of the input if it's a Constant\n dest.data[:] = .9 * f.data\n else:\n Operator(Eq(dest, f.avg(dims=f.dimensions[-1])), name='smoother').apply()",
"def smooth(img, sigma):\n if sigma < 0:\n raise ValueError('smoothing kernel size is negative')\n elif sigma == 0:\n return img.get_data()\n else:\n sigma_vox = sigma / np.sqrt(np.sum(img.get_affine()[0:3, 0:3] ** 2, 0))\n return nd.gaussian_filter(img.get_data(), sigma_vox)",
"def smooth_spectra(xarr, farr, sigma=3, nkern=20):\n xkern = np.arange(nkern)\n kern = np.exp(-(xkern - 0.5 * nkern) ** 2 / (sigma) ** 2)\n\n return gaussian_filter1d(farr, sigma)",
"def spectral_smooth(imagename, n_hanning=9):\n log_post(':: Smoothing spectral axis')\n infile = imagename + '.common'\n outfile = imagename + '.specsmooth'\n tmpfile = outfile + '.tmp'\n if_exists_remove(outfile)\n if_exists_remove(tmpfile)\n shutil.copytree(infile, tmpfile)\n for ii in range(n_hanning):\n log_post('-- Hanning smooth iteration: {0}'.format(ii))\n specsmooth(\n imagename=tmpfile,\n outfile=outfile,\n function='hanning',\n dmethod='', # no decimation\n )\n rmtables(tmpfile)\n os.rename(outfile, tmpfile)\n os.rename(tmpfile, outfile)",
"def apply_mask(niimgs, mask_img, dtype=np.float32,\n smooth=None, ensure_finite=True):\n mask = utils.check_niimg(mask_img)\n mask = mask_img.get_data().astype(np.bool)\n\n niimgs = utils.check_niimgs(niimgs)\n series = niimgs.get_data()\n affine = niimgs.get_affine()\n if ensure_finite:\n # SPM tends to put NaNs in the data outside the brain\n series[np.logical_not(np.isfinite(series))] = 0\n series = series.astype(dtype)\n affine = affine[:3, :3]\n # del data\n if isinstance(series, np.memmap):\n series = np.asarray(series).copy()\n if smooth is not None:\n # Convert from a sigma to a FWHM:\n # Do not use /=, smooth may be a numpy scalar\n smooth = smooth / np.sqrt(8 * np.log(2))\n vox_size = np.sqrt(np.sum(affine ** 2, axis=0))\n smooth_sigma = smooth / vox_size\n for this_volume in np.rollaxis(series, -1):\n this_volume[...] = ndimage.gaussian_filter(this_volume,\n smooth_sigma)\n return series[mask].T",
"def process_image(img):\n img[0] = img[0] * 0.229\n img[1] = img[1] * 0.224\n img[2] = img[2] * 0.225\n img[0] += 0.485\n img[1] += 0.456\n img[2] += 0.406\n\n return img.cpu().numpy().transpose((1, 2, 0))",
"def preprocessing(image, smooth_size, folder):\n from skimage.restoration import denoise_tv_chambolle\n \n dim = int(image.shape[0] / 50.)\n smoothed = rank.median(image, disk(smooth_size))\n #smoothed = denoise_tv_chambolle(image, weight=0.002)\n smoothed = rank.enhance_contrast(smoothed, disk(smooth_size))\n \n pl.subplot(2, 3, 1)\n pl.title(\"after median\")\n pl.imshow(smoothed)\n pl.gray()\n # If after smoothing the \"dot\" disappears\n # use the image value\n \n # TODO: wat do with thresh?\n try:\n im_max = smoothed.max()\n thresh = threshold_otsu(image)\n except:\n im_max = image.max()\n thresh = threshold_otsu(image)\n\n \n if im_max < thresh:\n labeled = np.zeros(smoothed.shape, dtype=np.int32)\n \n else:\n binary = smoothed > thresh\n \n # TODO: this array size is the fault of errors\n bin_open = binary_opening(binary, np.ones((dim, dim)), iterations=5)\n bin_close = binary_closing(bin_open, np.ones((5,5)), iterations=5)\n \n pl.subplot(2, 3, 2)\n pl.title(\"threshold\")\n pl.imshow(binary, interpolation='nearest')\n pl.subplot(2, 3, 3)\n pl.title(\"opening\")\n pl.imshow(bin_open, interpolation='nearest')\n pl.subplot(2, 3, 4)\n pl.title(\"closing\")\n pl.imshow(bin_close, interpolation='nearest')\n \n distance = ndimage.distance_transform_edt(bin_open)\n local_maxi = peak_local_max(distance,\n indices=False, labels=bin_open)\n \n markers = ndimage.label(local_maxi)[0]\n \n labeled = watershed(-distance, markers, mask=bin_open)\n pl.subplot(2, 3, 5)\n pl.title(\"label\")\n pl.imshow(labeled)\n #pl.show()\n pl.savefig(folder)\n pl.close('all')\n\n #misc.imsave(folder, labeled)\n# labels_rw = random_walker(bin_close, markers, mode='cg_mg')\n# \n# pl.imshow(labels_rw, interpolation='nearest')\n# pl.show()\n\n return labeled",
"def rms_smooth(self, i, sp_mw):\n mw = int(sp_mw*self.s_freq) # convert moving window size from seconds to samples\n \n # convolve for rolling RMS\n datsq = np.power(self.spfiltEEG[i], 2)\n window = np.ones(mw)/float(mw)\n # convolution mode 'valid' will remove edge effects, but also introduce a time shift\n # and downstream erors because it changes the length of the rms data\n rms = np.sqrt(np.convolve(datsq, window, 'same')) \n #spinfilt_RMS = pd.DataFrame(rms, index=self.data.index) --> add this back for > speed\n self.spRMS[i] = rms # for > speed, don't store spinfilt_RMS[i] as an attribute\n \n # smooth with moving average\n rms_avg = self.spRMS[i].rolling(mw, center=True).mean()\n self.spRMSmavg[i] = rms_avg",
"def do_smooth(d, WT, sample_rate):\n d_smooth = np.zeros(len(d))\n Wt = int(np.ceil(sample_rate*WT))\n for i in range(len(d)-Wt):\n d_smooth[i] = np.mean(d[i: i+Wt])\n d_smooth[0:Wt+100] = np.nan # +100 removes \"edge effects\" at start of f4\n return(d_smooth)",
"def gaussbroad(w, s, hwhm):\n \"\"\"\n History\n --------\n Dec-90 GB,GM\n Rewrote with fourier convolution algorithm.\n Jul-91 AL\n Translated from ANA to IDL.\n 22-Sep-91 JAV\n Relaxed constant dispersion check# vectorized, 50% faster.\n 05-Jul-92 JAV\n Converted to function, handle nonpositive hwhm.\n Oct-18 AW\n Python version\n \"\"\"\n\n # Warn user if hwhm is negative.\n if hwhm < 0:\n logger.warning(\"Forcing negative smoothing width to zero.\")\n\n # Return input argument if half-width is nonpositive.\n if hwhm <= 0:\n return s # true: no broadening\n\n # Calculate (uniform) dispersion.\n nw = len(w) ## points in spectrum\n wrange = w[-1] - w[0]\n dw = wrange / (nw - 1) # wavelength change per pixel\n\n # Make smoothing gaussian; extend to 4 sigma.\n # 4.0 / sqrt(2.0 * alog(2.0)) = 3.3972872\n # sqrt(alog(2.0)) = 0.83255461\n # sqrt(alog(2.0) / pi) = 0.46971864\n # (*1.0000632 to correct for >4 sigma wings)\n if hwhm >= 5 * wrange:\n return np.full(nw, np.sum(s) / nw)\n ## points in half gaussian\n nhalf = int(3.3972872 * hwhm / dw)\n ## points in gaussian (odd!)\n ng = 2 * nhalf + 1\n # wavelength scale of gaussian\n wg = dw * (np.arange(ng, dtype=float) - (ng - 1) / 2)\n # convenient absisca\n xg = (0.83255461 / hwhm) * wg\n # unit area gaussian w / FWHM\n gpro = (0.46974832 * dw / hwhm) * np.exp(-xg * xg)\n gpro = gpro / np.sum(gpro)\n\n # Pad spectrum ends to minimize impact of Fourier ringing.\n sout = convolve(s, gpro, mode=\"nearest\")\n\n return sout",
"def smooth(o_l, o_r, c_l, c_r, AMT):\n l = o_l * AMT + (1-AMT) * c_l\n r = o_r * AMT + (1-AMT) * c_r\n return (l, r)",
"def smooth(x, sam=10, type='gaussian'):\n if type is 'gaussian':\n win = sig.gaussian(sam, sam*.3) #the std is a shot in the dark \n elif type is 'flat':\n win = np.ones(sam)\n else:\n raise ValueError, \"type name not recognized\"\n scape = np.r_[2*x[0]-x[sam:1:-1],x,2*x[-1]-x[-1:-sam:-1]] \n smoothed = sig.convolve(np.divide(win, win.sum()), scape, mode='same')\n return smoothed[sam-1:-(sam-1)]",
"def fingauss_smoothing(engine, smoothed, R, d_k):\n code = CodeSegment(engine)\n def tf(k):\n k2 = sum(((2*kny/numpy.pi)*numpy.sin(ki*numpy.pi/(2*kny)))**2 for ki in k)\n wts = numpy.exp(-0.5*k2* R**2)\n return wts\n \n kny = numpy.pi*engine.pm.Nmesh[0]/engine.pm.BoxSize[0]\n code.assign(x='d_k', y='tmp')\n code.transfer(complex='tmp', tf=tf)\n code.c2r(real=smoothed, complex='tmp')\n return code",
"def smooth_nii(nii_file,\n out_file,\n smooth_filter='sinc',\n smooth_iter=40,\n relaxation=0.2):\n img = sitk.ReadImage(str(nii_file))\n volume = sitk.GetArrayFromImage(img)\n \n result = smooth_img(volume,\n smooth_filter='sinc',\n smooth_iter=40,\n relaxation=0.2)\n\n img_result = sitk.GetImageFromArray(result)\n img_result.SetDirection(img.GetDirection())\n img_result.SetOrigin(img.GetOrigin())\n img_result.SetSpacing(img.GetSpacing())\n\n sitk.WriteImage(img_result, str(out_file))",
"def make_glow_model(im_in, bin_x=1, bin_y=1):\n im=im_in.copy()\n im[0]=im[2]\n im[1]=im[2]\n im[-1]=im[-2]\n \n #glow image\n glow=np.zeros_like(im)\n \n #meshgrid\n x, y = np.meshgrid(np.arange(im.shape[1]), np.arange(im.shape[0]))\n \n \n def model_corner(im, x0, y0, xw, yw, iparams, std_clip=0):\n \"\"\" std_clip is the y height of the small corner to use to exclude\n spectra in the large corner,\n \n (iparams=(glow amp, x center, y center, xwid, ywid, xy amount)\n \n positions and initial params adjusted automatically for binning\n pass coordinates in 4k positions\n \"\"\"\n x0/=bin_x\n y0/=bin_y\n xw/=bin_x\n yw/=bin_y\n iparams=list(iparams)\n iparams[1]/=bin_x\n iparams[2]/=bin_y\n iparams[3]/=bin_x\n iparams[4]/=bin_y\n \n corner=im[y0:y0+yw,x0:x0+xw].copy()\n if std_clip:\n small_corner=im[y0:y0+std_clip,x0:x0+xw].copy()\n patch_locs=corner>2*small_corner.std()\n patch_locs[:y0+std_clip,:]=False\n corner[patch_locs]=np.median(small_corner)\n cim, param= gaussfit2D(corner, iparams)\n param=list(param)\n param[-1]=0\n param[1]+=x0\n param[2]+=y0\n return gauss2D(( x,y), *param)\n \n #Lower R\n try:\n tmp=model_corner(im, 3996, 2, 100, 100,\n (150, 58, -7, 30.0, 20.0, 0, 0))\n if tmp.min() < 0:\n raise RuntimeError('Glow model has negative values')\n else:\n glow+=tmp\n\n except RuntimeError, e:\n print 'Lower R glow model failed: {}'.format(str(e))\n\n #Lower L\n try:\n tmp=model_corner(im, 0, 2, 100, 100,\n (150, 40, 0, 30.0, 20.0, 0, 0),\n std_clip=50)\n if tmp.min() < 0:\n raise RuntimeError('Glow model has negative values')\n else:\n glow+=tmp\n\n except RuntimeError, e:\n print 'Lower L glow model failed: {}'.format(str(e))\n \n\n #Upper L\n try:\n tmp=model_corner(im, 0, 4012, 100, 100,\n (150, 40, 100, 30.0, 20.0, 0, 0))\n if tmp.min() < 0:\n raise RuntimeError('Glow model has negative values')\n else:\n glow+=tmp\n\n except RuntimeError, e:\n print 'Upper L glow model failed: {}'.format(str(e))\n\n #Upper R\n try:\n tmp=model_corner(im, 3996, 4000, 100, 100,\n (150, 58, 100, 30.0, 20.0, 0, 0))\n if tmp.min() < 0:\n raise RuntimeError('Glow model has negative values')\n else:\n glow+=tmp\n\n except RuntimeError, e:\n print 'Upper R glow model failed: {}'.format(str(e))\n \n return glow",
"def smooth_gray_image(raw_img):\n return cv2.blur(raw_img, (5, 5))",
"def smoothfield( d , boxsize, Rth ):\n\n ng = N.shape(d)[0]\n dk = N.fft.rfftn(d)\n kgrid = getkgrid(boxsize,ng)\n dk = dk * N.exp( -(kgrid * Rth) ** 2. / 2. )\n d = N.fft.irfftn(dk)\n\n return d",
"def temporal_smooth(s, sample_rate, tau, hwinlen=20):\n\n t = np.arange(-hwinlen, hwinlen+1) / sample_rate\n w = np.exp(-t**2 / tau)\n w /= w.sum()\n return convolve1d(s, w)",
"def apply_smoothing(image, kernel_size=3):\n return cv2.GaussianBlur(image, (kernel_size, kernel_size), 0)"
] | [
"0.6486485",
"0.6467457",
"0.6421818",
"0.62798643",
"0.62666625",
"0.607366",
"0.6062642",
"0.6041981",
"0.5998901",
"0.59359425",
"0.5898149",
"0.5896014",
"0.5868493",
"0.5863498",
"0.5743527",
"0.57388455",
"0.57168955",
"0.57009465",
"0.5594682",
"0.55919045",
"0.5564512",
"0.55632323",
"0.55614996",
"0.55404025",
"0.5505964",
"0.5505818",
"0.54870385",
"0.5484477",
"0.54737145",
"0.5464035"
] | 0.7523696 | 0 |
Get the config value with the label. | def get(self, label):
if label in self.config[self.env]:
return self.config[self.env][label]
else:
logging.warning(f'Config Mgr->get(): label: {label} not configured')
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getValue(self, label, default=None):\n # Allow getValue using the label string\n if isinstance(label, basestring):\n label = str2Label(label)\n return self._labelDict.get(label, default)",
"def get_config_value(self, name):\r\n if name in self.config_values:\r\n return self.config_values[name]",
"def get_field_value(self, label):\n input_element_id = self.selenium.get_element_attribute(\n \"xpath://label[contains(., '{}')]\".format(label), \"for\"\n )\n value = self.selenium.get_value(input_element_id)\n return value",
"def _get_config_value(self, section, key):\n return config.get(section, key)",
"def get_value_label(self, value):\n return self.label_config.get_index_label(value)",
"def getvalue(self,num,name):\n return self.M.conf(num)[name]",
"def value(self) -> str:\n return self._config.get('value')",
"def getValue(self, valueName):\n\t\treturn self.settings[valueName][0]",
"def config_value(name):\n def get():\n try:\n return config.get('yourls', name)\n except (NoOptionError, NoSectionError):\n return None\n return get",
"def get_config_value(keyword):\n if g_configs and keyword in g_configs:\n return g_configs[keyword]\n return \"\"",
"def get_configval(self, keyname, defaultval=None):\n return self.cfghelper.get_value(keyname,defaultval)",
"def _GetConfigValue(self, config_parser, section_name, value_name):\n try:\n return config_parser.get(section_name, value_name).decode('utf-8')\n except configparser.NoOptionError:\n return",
"def _GetConfigValue(self, config_parser, section_name, value_name):\n try:\n return config_parser.get(section_name, value_name).decode('utf-8')\n except (configparser.NoOptionError, configparser.NoSectionError):\n return",
"def get_label_config(self):\n return self.label_config_center",
"def get_config(self, name):\n return self.configs[name][0]",
"def get_system_value(name: str):\n return Config.objects.first().__dict__[name]",
"def get(self, key):\n return self.labels[key]",
"def get_value(val_name, default=None):\r\n configuration = get_configuration()\r\n return configuration.get(val_name, default)",
"def get_value(self, key):\n if key not in self._config:\n raise ValueError(\"%s not in self.config\"%key)\n return self._config[key][\"value\"]",
"def parse_value(self, value_name, default=None):\n\t\treturn self.cfg_root.find(value_name).text",
"def _get_label(self):\n return self.label",
"def get_label(settings):",
"def GetValue(self):\n \n return self.choices[self.selected].GetLabel()",
"def setting(self, config, name, default=None):\n\n return config.get(name, default) if config else default",
"def get_label(self, key):\n return self.labels.get(key, None)",
"def get(self, key):\n return self.config.get(key)",
"def __getitem__(self, name):\n return self.config[name]",
"def get_label(self):\n return self.job[self.label_key]",
"def value(self):\n\n memcached_items = memcache_services.get_multi([self.name])\n if self.name in memcached_items:\n return memcached_items[self.name]\n\n datastore_item = config_models.ConfigPropertyModel.get(\n self.name, strict=False)\n if datastore_item is not None:\n memcache_services.set_multi({\n datastore_item.id: datastore_item.value})\n return datastore_item.value\n\n return self.default_value",
"def get_config_var(name):\n return get_config_vars().get(name)"
] | [
"0.76343673",
"0.7445326",
"0.6866368",
"0.6811643",
"0.6778108",
"0.6752221",
"0.6746077",
"0.6634147",
"0.6633436",
"0.66317177",
"0.6629522",
"0.66281676",
"0.66143626",
"0.6585026",
"0.64996743",
"0.64693654",
"0.6405412",
"0.63843614",
"0.6352854",
"0.6325311",
"0.62857497",
"0.62741613",
"0.6269089",
"0.62332284",
"0.6224804",
"0.62235326",
"0.6179844",
"0.6179091",
"0.61773235",
"0.61749315"
] | 0.78867954 | 0 |
Return a fullyformatted connect string for psycopg2.connect using the config paramters. | def get_db_connect_string(self):
template_string = "host={} dbname={} user={} password={}"
return template_string.format(self.get("DB_HOST"),
self.get("DB_NAME"),
self.get("DB_USER"),
self.get("DB_PASSWORD")) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_connection_string(self):\n auth = ''\n if self.user:\n auth = self.user\n if self.password:\n auth = auth + ':' + self.password\n if auth:\n auth += '@'\n conn_string = 'postgresql://{auth}{host}/{dbname}'.format(\n auth=auth, host=self.host, dbname=self.dbname)\n\n return conn_string",
"def get_connection_string(self):\n auth = ''\n if self._user:\n auth = self._user\n if self._password:\n auth = auth + ':' + self._password\n if auth:\n auth += '@'\n conn_string = 'postgresql://{auth}{host}/{dbname}'.format(\n auth=auth, host=self._hostname, dbname=self._dbname)\n\n return conn_string",
"def connection_string(self) -> str:\n if self.dialect == \"sqlite\":\n ret_connection_string = f\"{self.dialect}:///{self.database}\"\n else:\n escaped_password: str = urllib.parse.quote_plus(self.password)\n auth_section: str = f\"{self.username}:{escaped_password}\"\n address: str = f\"{self.host}:{self.port}\"\n ret_connection_string = f\"{self.dialect}://{auth_section}@{address}/{self.database}\"\n\n return ret_connection_string",
"def _get_connect_string(backend,\n user=\"openstack_citest\",\n passwd=\"openstack_citest\",\n database=\"openstack_citest\"):\n if backend == \"mysql\":\n backend = \"mysql+mysqldb\"\n elif backend == \"postgres\":\n backend = \"postgresql+psycopg2\"\n\n return (\"%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s\"\n % {'backend': backend, 'user': user, 'passwd': passwd,\n 'database': database})",
"def create_db_conn_string(db_config):\n db_config = {\n 'host': environ['DB_HOST'] if 'DB_HOST' in environ else db_config['Host'],\n 'name': environ['DB_NAME'] if 'DB_NAME' in environ else db_config['Name'],\n 'username': environ['DB_USERNAME'] if 'DB_USERNAME' in environ \\\n else db_config['User'],\n 'password': environ['DB_PASSWORD'] if 'DB_PASSWORD' in environ \\\n else db_config['Password']\n }\n\n return f'host={db_config[\"host\"]} user={db_config[\"username\"]} ' \\\n f'password={db_config[\"password\"]} dbname={db_config[\"name\"]}'",
"def connection_string(self):\n return \"postgresql+psycopg2://%s:%s@%s:%s/%s\" % \\\n (os.environ['DB_USER'],\n os.environ['DB_PASSWORD'],\n os.environ['DB_HOST'],\n os.environ['DB_PORT'],\n self.database_name)",
"def _get_connection_string(self, database):\r\n return ('ODBC;DNS=;DRIVER={Client Access ODBC Driver (32-bit)};'\r\n 'SYSTEM=%s;CMT=0;DBQ=%s;NAM=%d;DFT=5;DSP=1;TFT=0;TSP=0;DEC=0;'\r\n 'XDYNAMIC=0;UID=%s;PWD=%s;RECBLOCK=2;BLOCKSIZE=512;SCROLLABLE=0;'\r\n 'TRANSLATE=0;LAZYCLOSE=0;LIBVIEW=0;REMARKS=0;CONNTYPE=0;SORTTYPE=2;'\r\n 'SORTWEIGHT=1;LANGUAGEID=ENG;PREFETCH=0;DFTPKGLIB=' \r\n % (self._sys_name, database, self._delimiters[self._sql_delimiter], self._user, self._pwd))",
"def get_connection_string():\n connection_string = 'postgresql://' + config.GM_DB_USER + \":\" + config.GM_DB_PASSWORD + \"@\" \\\n + config.GM_DB_HOST + \":\" + config.GM_DB_PORT + \"/\" + config.GM_DB_NAME\n return connection_string",
"def get_db_landing_connect_string(self):\n template_string = \"host={} dbname={} user={} password={}\"\n return template_string.format(self.get(\"DB_HOST\"),\n self.get(\"DB_LANDING_NAME\"),\n self.get(\"DB_LANDING_USER\"),\n self.get(\"DB_LANDING_PASSWORD\"))",
"def posgresql_dsn_formatter(credentials):\n\n return \"postgresql://{0}:{1}@{2}:{3}/{4}\".format(credentials[\"username\"],\n credentials[\"password\"],\n credentials[\"host\"],\n credentials[\"port\"],\n credentials[\"path\"])",
"def get_pg_str(host, port, user, dbname, password):\n return 'PG:host={} port={} user={} dbname={} password={}'.format(\n host, port, user, dbname, password\n )",
"def buildConnectionString(params):\n print params\n return \";\".join([\"%s=%s\" % (a, b) for a, b in params.items()])",
"def connect():\r\n params = config()\r\n print('Connecting to the PostgreSQL database...')\r\n global conn\r\n conn = psycopg2.connect(**params)",
"def get_db_connection_string(configpath):\n with open(configpath, 'r') as f:\n config = json.load(f)\n # read keys from json config\n DB_NAME = config['db']\n USER = config['user']\n PASSWORD = config['password']\n # return connection string\n return 'dbname={0} user={1} password={2}'.format(DB_NAME, USER, PASSWORD)",
"def return_connection(db_key):\n\n config = load_config()\n\n db = config[db_key][\"database_name\"]\n user = config[db_key][\"database_user\"]\n host = \"localhost\"\n password = config[db_key][\"database_pass\"]\n\n return \"\"\"dbname=%s user=%s password=%s host=%s\"\"\" % (db, user, password,\n host)",
"def get_db_conn(db_config):\n return psycopg2.connect(\n \"dbname='{}' user='{}' host='{}'\".format(\n db_config[\"name\"],\n db_config[\"user\"],\n db_config[\"host\"]\n )\n )",
"def connection_string(db_type, database, host='localhost', port=None, username=None, password=None):\n if db_type == 'mysql':\n if port:\n return f\"mysql://{username}:{password}@{host}:port/{database}\"\n else:\n return f\"mysql://{username}:{password}@{host}/{database}\"\n\n if db_type == 'sqlite':\n return f'sqlite:///{database}'",
"def buildConnectionString(params):\n return \";\".join([\"%s=%s\" % (k,v) for k, v in params.items()])",
"def info(indent=''):\n connect_string = config.get_database()\n if connect_string.startswith('sqlite:'):\n return indent + 'sqlite3 @ ' + os.path.abspath(connect_string[7:])\n else:\n raise ValueError('invalid connect string \\'{}\\''.format(connect_string))",
"def ogrString(self):\r\n return 'host={} user={} port={} dbname={} password={}'.format(self.host, self.user, self.port, self.database, self.password)",
"def connect():\n cf = configparser.ConfigParser()\n cf.read(\"config.ini\")\n try:\n connection = psycopg2.connect(user=cf.get('postgres', 'user'),\n password=cf.get('postgres', 'password'),\n host=cf.get('postgres', 'host'),\n port=cf.get('postgres', 'port'),\n database=cf.get('postgres', 'database')\n )\n return connection\n except (Exception, psycopg2.Error) as error:\n print(\"Error while connecting to PostgresSQL\", error)",
"def get_sql_conn():\r\n\r\n # get config information\r\n config = configparser.ConfigParser()\r\n config.sections()\r\n config.read('../config.ini')\r\n dbname = config['PostgresDB']['db_name']\r\n host = config['PostgresDB']['host']\r\n port = config['PostgresDB']['port']\r\n user = config['PostgresDB']['user']\r\n pw = config['PostgresDB']['pw']\r\n\r\n # connect to the database\r\n conn = psycopg2.connect(host=host, port=port, database=dbname,\r\n user=user, password=pw)\r\n return conn",
"def get_connection_string(src_or_dest):\n if src_or_dest == \"src\":\n try:\n SQL_SERVER = os.environ[\"CROP_SRC_SQL_SERVER\"]\n SQL_PASSWORD = os.environ[\"CROP_SRC_SQL_PASS\"]\n except:\n print(\n \"Need to set environment variables CROP_SRC_SQL_SERVER, CROP_SRC_SQL_PASS\"\n )\n return None\n elif src_or_dest == \"dest\":\n try:\n SQL_SERVER = os.environ[\"CROP_DEST_SQL_SERVER\"]\n SQL_PASSWORD = os.environ[\"CROP_DEST_SQL_PASS\"]\n except:\n print(\n \"Need to set environment variables CROP_DEST_SQL_SERVER, CROP_DEST_SQL_PASS\"\n )\n return None\n else:\n print(\"Error: need to specify 'src' or 'dest'\")\n SQL_USERNAME = os.environ[\"CROP_SQL_USERNAME\"]\n SQL_USER = f\"{SQL_USERNAME}@{SQL_SERVER}\"\n SQL_HOST = f\"{SQL_SERVER}.postgres.database.azure.com\"\n SQL_CONNECTION_STRING = \"%s://%s:%s@%s:%s\" % (\n SQL_ENGINE,\n SQL_USER,\n parse.quote(SQL_PASSWORD),\n SQL_HOST,\n SQL_PORT,\n )\n return SQL_CONNECTION_STRING",
"def buildConnectionStringx(**paramx):\n return \";\".join([\"%s=%s\" % (k, v) for k, v in paramx.items()])",
"def createConnection(self):\r\n conn_string = \"host='{}' dbname='{}' user='{}' password='{}' port={}\".format(\r\n self.host, self.database, self.user, self.password, self.port)\r\n return psycopg2.connect(conn_string)",
"def conn_config_name(conn_config):\n\n n_conn_config = [cname for cname, on in conn_config.items() if on]\n n_conn_config = ' + '.join(n_conn_config)\n return n_conn_config",
"def connect(db_config):\n conn = None\n try:\n print('Connecting to db...')\n url = \"dbname={} user={} password={} host={} port={}\".\\\n format(db_config['dbname'], db_config['user'],\n db_config['password'], db_config['host'],\n db_config['port'])\n conn = psycopg2.connect(url)\n except(Exception, psycopg2.DatabaseError) as error:\n print(\"Warning: Connection Error\", error)\n finally:\n if conn is not None:\n print(\"Connected to db\")\n db_connection['connection'] = conn\n return conn",
"def connect(con_str):\r\n try:\r\n connection = psycopg2.connect(**con_str)\r\n return connection\r\n except Exception as conn_err:\r\n print(conn_err)\r\n print('Unable to connect to database. Aborting')",
"def build_db_uri() -> str:\n if DatabaseConfig.uri:\n return DatabaseConfig.uri\n\n return (\n f\"postgresql://{DatabaseConfig.username}:{DatabaseConfig.password}\"\n f\"@{DatabaseConfig.host}:{DatabaseConfig.port}/{DatabaseConfig.database}\"\n )",
"def connect_to_db(config: Config):\n logger.info(f'resolving db host from {config.db_host} DNS: {config.flyio_dns}')\n db_host_ip = resolve_db_host(config)\n logger.info(f'resolved db host ip: {db_host_ip}')\n\n return psycopg2.connect(\n # NOTE: for demo, we use database template1\n (f\"dbname='template1' \"\n f\"user='{config.db_user}' \"\n f\"host='{db_host_ip}' \"\n f\"password='{config.db_password}'\")\n )"
] | [
"0.714022",
"0.71283305",
"0.69476783",
"0.6788406",
"0.6728916",
"0.67060965",
"0.6638018",
"0.6636255",
"0.6568879",
"0.6510854",
"0.645065",
"0.6360388",
"0.62624675",
"0.6222213",
"0.61551327",
"0.61442584",
"0.6135438",
"0.6134094",
"0.6118196",
"0.60870296",
"0.6026554",
"0.59844744",
"0.5972208",
"0.5935692",
"0.5899324",
"0.5873847",
"0.58668953",
"0.5854368",
"0.58145106",
"0.58017623"
] | 0.7425035 | 0 |
Returns a formatted connect string to connect to the "landing" DB (which is the preexisting "studentdb") in order to be able to drop and create the "sparkifydb". | def get_db_landing_connect_string(self):
template_string = "host={} dbname={} user={} password={}"
return template_string.format(self.get("DB_HOST"),
self.get("DB_LANDING_NAME"),
self.get("DB_LANDING_USER"),
self.get("DB_LANDING_PASSWORD")) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_db_connect_string(self):\n template_string = \"host={} dbname={} user={} password={}\"\n return template_string.format(self.get(\"DB_HOST\"),\n self.get(\"DB_NAME\"),\n self.get(\"DB_USER\"),\n self.get(\"DB_PASSWORD\"))",
"def info(indent=''):\n connect_string = config.get_database()\n if connect_string.startswith('sqlite:'):\n return indent + 'sqlite3 @ ' + os.path.abspath(connect_string[7:])\n else:\n raise ValueError('invalid connect string \\'{}\\''.format(connect_string))",
"def connection_string(self) -> str:\n if self.dialect == \"sqlite\":\n ret_connection_string = f\"{self.dialect}:///{self.database}\"\n else:\n escaped_password: str = urllib.parse.quote_plus(self.password)\n auth_section: str = f\"{self.username}:{escaped_password}\"\n address: str = f\"{self.host}:{self.port}\"\n ret_connection_string = f\"{self.dialect}://{auth_section}@{address}/{self.database}\"\n\n return ret_connection_string",
"def _get_connection_string(self, database):\r\n return ('ODBC;DNS=;DRIVER={Client Access ODBC Driver (32-bit)};'\r\n 'SYSTEM=%s;CMT=0;DBQ=%s;NAM=%d;DFT=5;DSP=1;TFT=0;TSP=0;DEC=0;'\r\n 'XDYNAMIC=0;UID=%s;PWD=%s;RECBLOCK=2;BLOCKSIZE=512;SCROLLABLE=0;'\r\n 'TRANSLATE=0;LAZYCLOSE=0;LIBVIEW=0;REMARKS=0;CONNTYPE=0;SORTTYPE=2;'\r\n 'SORTWEIGHT=1;LANGUAGEID=ENG;PREFETCH=0;DFTPKGLIB=' \r\n % (self._sys_name, database, self._delimiters[self._sql_delimiter], self._user, self._pwd))",
"def _get_connect_string(backend,\n user=\"openstack_citest\",\n passwd=\"openstack_citest\",\n database=\"openstack_citest\"):\n if backend == \"mysql\":\n backend = \"mysql+mysqldb\"\n elif backend == \"postgres\":\n backend = \"postgresql+psycopg2\"\n\n return (\"%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s\"\n % {'backend': backend, 'user': user, 'passwd': passwd,\n 'database': database})",
"def create_db_conn_string(db_config):\n db_config = {\n 'host': environ['DB_HOST'] if 'DB_HOST' in environ else db_config['Host'],\n 'name': environ['DB_NAME'] if 'DB_NAME' in environ else db_config['Name'],\n 'username': environ['DB_USERNAME'] if 'DB_USERNAME' in environ \\\n else db_config['User'],\n 'password': environ['DB_PASSWORD'] if 'DB_PASSWORD' in environ \\\n else db_config['Password']\n }\n\n return f'host={db_config[\"host\"]} user={db_config[\"username\"]} ' \\\n f'password={db_config[\"password\"]} dbname={db_config[\"name\"]}'",
"def connection_string(db_type, database, host='localhost', port=None, username=None, password=None):\n if db_type == 'mysql':\n if port:\n return f\"mysql://{username}:{password}@{host}:port/{database}\"\n else:\n return f\"mysql://{username}:{password}@{host}/{database}\"\n\n if db_type == 'sqlite':\n return f'sqlite:///{database}'",
"def _adjust_connection_URL(self, text):\n dbname = self.options.db\n parts = text.split('/')\n\n # Preserve the quotes if present\n if parts[-1].endswith(\"'\"):\n dbname += \"'\"\n\n parts[-1] = dbname\n return '/'.join(parts)",
"def get_connection_string(self):\n auth = ''\n if self.user:\n auth = self.user\n if self.password:\n auth = auth + ':' + self.password\n if auth:\n auth += '@'\n conn_string = 'postgresql://{auth}{host}/{dbname}'.format(\n auth=auth, host=self.host, dbname=self.dbname)\n\n return conn_string",
"def get_connection_string(self):\n auth = ''\n if self._user:\n auth = self._user\n if self._password:\n auth = auth + ':' + self._password\n if auth:\n auth += '@'\n conn_string = 'postgresql://{auth}{host}/{dbname}'.format(\n auth=auth, host=self._hostname, dbname=self._dbname)\n\n return conn_string",
"def fulldbname(self):\n return 'myfls_'+self.user.username+'_'+self.dbname",
"def _format_database(self, request: DatabaseSecretRequest,\n secret: Secret) -> str:\n username, password = secret.value\n return f'{request.engine}://{username}:{password}@' \\\n f'{request.host}:{request.port}/{request.database}?' \\\n f'{request.params}'",
"def connection_string(self):\n return \"postgresql+psycopg2://%s:%s@%s:%s/%s\" % \\\n (os.environ['DB_USER'],\n os.environ['DB_PASSWORD'],\n os.environ['DB_HOST'],\n os.environ['DB_PORT'],\n self.database_name)",
"def build_db_uri() -> str:\n if DatabaseConfig.uri:\n return DatabaseConfig.uri\n\n return (\n f\"postgresql://{DatabaseConfig.username}:{DatabaseConfig.password}\"\n f\"@{DatabaseConfig.host}:{DatabaseConfig.port}/{DatabaseConfig.database}\"\n )",
"def get_connection_string():\n connection_string = 'postgresql://' + config.GM_DB_USER + \":\" + config.GM_DB_PASSWORD + \"@\" \\\n + config.GM_DB_HOST + \":\" + config.GM_DB_PORT + \"/\" + config.GM_DB_NAME\n return connection_string",
"def _get_snowflake_connection_string() -> str:\n sfUser = os.environ.get(\"SNOWFLAKE_USER\") # noqa: TID251\n sfPswd = os.environ.get(\"SNOWFLAKE_PW\") # noqa: TID251\n sfAccount = os.environ.get(\"SNOWFLAKE_ACCOUNT\") # noqa: TID251\n sfDatabase = os.environ.get(\"SNOWFLAKE_DATABASE\") # noqa: TID251\n sfSchema = os.environ.get(\"SNOWFLAKE_SCHEMA\") # noqa: TID251\n sfWarehouse = os.environ.get(\"SNOWFLAKE_WAREHOUSE\") # noqa: TID251\n sfRole = os.environ.get(\"SNOWFLAKE_ROLE\") or \"PUBLIC\" # noqa: TID251\n\n url = f\"snowflake://{sfUser}:{sfPswd}@{sfAccount}/{sfDatabase}/{sfSchema}?warehouse={sfWarehouse}&role={sfRole}\"\n\n return url",
"def build_db_uri() -> str:\n\n return \"{DB_DRIVER}://{DB_USERNAME}:{DB_PASSWD}@{DB_HOST}:{DB_PORT}/{DB_NAME}\".format(**{\n 'DB_DRIVER': os.environ.get('DB_DRIVER', ''),\n 'DB_HOST': os.environ.get('DB_HOST', ''),\n 'DB_PORT': os.environ.get('DB_PORT', ''),\n 'DB_NAME': os.environ.get('DB_NAME', ''),\n 'DB_USERNAME': os.environ.get('DB_USERNAME', ''),\n 'DB_PASSWD': os.environ.get('DB_PASSWD', '')\n })",
"def get_sqldb_dsn(vcap_services):\n parsed = json.loads(vcap_services)\n credentials = parsed[\"sqldb\"][0][\"credentials\"]\n user = credentials[\"username\"]\n password = credentials[\"password\"]\n host = credentials[\"hostname\"]\n port = credentials[\"port\"]\n dbname = credentials[\"db\"]\n dsn = \"\"\"DATABASE={};HOSTNAME={};PORT={};UID={};PWD={};\"\"\".format(dbname, host, port, user, password)\n return dsn",
"def return_connection(db_key):\n\n config = load_config()\n\n db = config[db_key][\"database_name\"]\n user = config[db_key][\"database_user\"]\n host = \"localhost\"\n password = config[db_key][\"database_pass\"]\n\n return \"\"\"dbname=%s user=%s password=%s host=%s\"\"\" % (db, user, password,\n host)",
"def database_uri(env):\r\n return (\r\n \"{dialect}+{driver}://{user}:{password}@{host}:{port}/{database}?charset={charset}\"\r\n ).format(\r\n dialect=configfile.get(\"database\", \"dialect\"),\r\n driver=configfile.get(\"database\", \"driver\"),\r\n user=configfile.get(\"database\", \"username\"),\r\n password=configfile.get(\"database\", \"password\"),\r\n host=configfile.get(\"database\", \"host\"),\r\n port=configfile.get(\"database\", \"port\"),\r\n database=configfile.get(\"database\", \"database\"),\r\n charset=configfile.get(\"database\", \"charset\"),\r\n )",
"def __str__(self):\n if self.dialect == \"sqlite\":\n db_conn = self._sqlite()\n elif self.dialect.startswith(\"postgres\"):\n db_conn = self._postgresql()\n elif self.dialect == \"mysql\":\n db_conn = self._mysql()\n else:\n raise ValueError(\"Database dialect not supported\")\n self._test_connection(db_conn)\n return db_conn",
"def get_connection_string(src_or_dest):\n if src_or_dest == \"src\":\n try:\n SQL_SERVER = os.environ[\"CROP_SRC_SQL_SERVER\"]\n SQL_PASSWORD = os.environ[\"CROP_SRC_SQL_PASS\"]\n except:\n print(\n \"Need to set environment variables CROP_SRC_SQL_SERVER, CROP_SRC_SQL_PASS\"\n )\n return None\n elif src_or_dest == \"dest\":\n try:\n SQL_SERVER = os.environ[\"CROP_DEST_SQL_SERVER\"]\n SQL_PASSWORD = os.environ[\"CROP_DEST_SQL_PASS\"]\n except:\n print(\n \"Need to set environment variables CROP_DEST_SQL_SERVER, CROP_DEST_SQL_PASS\"\n )\n return None\n else:\n print(\"Error: need to specify 'src' or 'dest'\")\n SQL_USERNAME = os.environ[\"CROP_SQL_USERNAME\"]\n SQL_USER = f\"{SQL_USERNAME}@{SQL_SERVER}\"\n SQL_HOST = f\"{SQL_SERVER}.postgres.database.azure.com\"\n SQL_CONNECTION_STRING = \"%s://%s:%s@%s:%s\" % (\n SQL_ENGINE,\n SQL_USER,\n parse.quote(SQL_PASSWORD),\n SQL_HOST,\n SQL_PORT,\n )\n return SQL_CONNECTION_STRING",
"def _dbconnect(self):\n user = mm_cfg.STORM_MEMBER_DB_USER\n password = mm_cfg.STORM_MEMBER_DB_PASS\n host = mm_cfg.STORM_MEMBER_DB_HOST\n dbname = mm_cfg.STORM_MEMBER_DB_NAME\n\n\n db = 'postgres://'+user+':'+password+'@'+host+'/'+dbname\n return create_database(db)",
"def getDatabaseConnection(self):\n \n strConn = \"dbname='\" + self.__dbname + \"' user='\" + self.__user + \"' host='\" + self.__host + \"' port='\" + self.__port + \"' password='\" + self.__password + \"'\"\n return strConn",
"def _conn_str_with_database(conn_str, dbname):\n dbname_clause = 'dbname={}'.format(dbname)\n if 'dbname=' in conn_str:\n new_conn_str = re.sub(r'dbname=([^ ]+)', dbname_clause, conn_str)\n else:\n new_conn_str = conn_str + ' ' + dbname_clause\n return new_conn_str",
"def make_dsn(settings=django_settings):\r\n\r\n try:\r\n dsn = {\r\n 'mysql': 'mysql',\r\n 'postgresql': 'postgres',\r\n 'postgresql_psycopg2': 'postgres',\r\n 'sqlite3': 'sqlite',\r\n }[settings.DATABASE_ENGINE]\r\n except:\r\n dsn = settings.DATABASE_ENGINE\r\n dsn += '://'\r\n\r\n if settings.DATABASE_USER:\r\n dsn += settings.DATABASE_USER\r\n dsn += ':'\r\n\r\n if settings.DATABASE_PASSWORD:\r\n dsn += settings.DATABASE_PASSWORD\r\n dsn += '@'\r\n\r\n if settings.DATABASE_HOST:\r\n dsn += settings.DATABASE_HOST\r\n dsn += ':'\r\n\r\n if settings.DATABASE_PORT:\r\n dsn += '%s' % settings.DATABASE_PORT\r\n\r\n if settings.DATABASE_NAME:\r\n dsn += '/' + settings.DATABASE_NAME\r\n\r\n return dsn",
"def get_db_conn(server, database, version='sde.DEFAULT'):\n scratch_work = arcpy.env.scratchFolder\n conn_name = 'temp__{}_{}'.format(server, database)\n conn_path = '{}//{}.sde'.format(scratch_work, conn_name)\n\n with TempOverwrite():\n arcpy.CreateDatabaseConnection_management(\n scratch_work,\n conn_name,\n database_platform='SQL_SERVER',\n instance=server,\n account_authentication='OPERATING_SYSTEM_AUTH',\n database=database,\n version=version\n )\n\n return conn_path",
"def getDefaultDBName() -> str:\n return f\"dbname={getpass.getuser()}\"",
"def getDatabaseName(self):\n return f\"n{self.name.capitalize()}\"",
"def get_coonection(login, password):\n conn = pymssql.connect(\n host=\"130.193.44.114\",\n user=login,\n password=password,\n database='dwh'\n )\n \n return conn"
] | [
"0.7096417",
"0.6597485",
"0.65960795",
"0.645154",
"0.64452803",
"0.61906487",
"0.61793053",
"0.61535686",
"0.6109083",
"0.6074845",
"0.60448676",
"0.60006577",
"0.59533644",
"0.5940615",
"0.58933663",
"0.58868223",
"0.58568895",
"0.58527315",
"0.57882077",
"0.57715565",
"0.57636344",
"0.57438165",
"0.5738393",
"0.56863815",
"0.5677623",
"0.562898",
"0.56083626",
"0.5577109",
"0.5570848",
"0.5541631"
] | 0.76311576 | 0 |
Redirect to the 'get_absolute_url' of an Entry, accordingly to 'object_id' argument | def entry_shortlink(request, object_id):
entry = get_object_or_404(Entry, pk=object_id)
return redirect(entry, permanent=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def relative_view_on_site(request, content_type_id, object_id):\n try:\n content_type = ContentType.objects.get(pk=content_type_id)\n if not content_type.model_class():\n raise http.Http404(\"Content type %s object has no associated model\" % content_type_id)\n obj = content_type.get_object_for_this_type(pk=object_id)\n except (ObjectDoesNotExist, ValueError):\n raise http.Http404(\"Content type %s object %s doesn't exist\" % (content_type_id, object_id))\n try:\n return http.HttpResponseRedirect(obj.get_absolute_url())\n except AttributeError:\n raise http.Http404(\"%s objects don't have get_absolute_url() methods\" % content_type.name)",
"def futon(req, object_id):\n return HttpResponseRedirect(futon_url(object_id))",
"def _redirect_implementation(request, model, b36_encoded_pk):\n endpoint = get_object_or_404(model, pk=base36_to_int(b36_encoded_pk))\n shorturl_redirect.send(sender=model, instance=endpoint, user=request.user)\n return endpoint.url",
"def render_url(self, object_id):\r\n return reverse(\"%s:insert_%s_%s_render\" % (\r\n self.admin_site.name,\r\n self.model._meta.app_label,\r\n self.model._meta.module_name\r\n ), args=(object_id,))",
"def get(self, request, **kwargs):\n self.object = self.get_object()\n if self.request.path != self.object.get_absolute_url():\n return redirect(self.object)\n else:\n context = self.get_context_data(object=self.object)\n return self.render_to_response(context)",
"def forward_to(id):\n\n db = init_connection_engine()\n\n if id == 'short_URL':\n return redirect(url_for('index'))\n else:\n # Looking up the URL by its ID in the DB.\n try:\n # Using a with statement ensures that the connection is always released\n # back into the pool at the end of statement (even if an error occurs).\n with db.connect() as conn:\n lookup_url = \"SELECT url_data FROM url_list WHERE url_id='\" + id + \"';\"\n target_url = conn.execute(lookup_url).fetchone()\n # If target URL is not found.\n if not target_url:\n flash('Not found')\n return redirect(url_for('index'))\n # If something goes wrong.\n except:\n flash('Something went wrong')\n return redirect(url_for('index'))\n\n return redirect(target_url[0])",
"def get_absolute_url(self):\n\n return reverse('kid-detail', args=[str(self.id)])",
"def get_content_object_url(self):\n return urlresolvers.reverse(\n \"reviews-url-redirect\",\n args=(self.content_type_id, self.object_pk)\n )",
"def get_absolute_url(self):\n return reverse('model-detail-view', args=[str(self.id)])",
"def redirect_to_url(request, short_url):\n try:\n url = Url.objects.get(short_url=short_url)\n except Url.DoesNotExist:\n raise Http404()\n else:\n return HttpResponseRedirect(url.url)",
"def get_absolute_url(self):\n return reverse('postdetail', args=[str(self.pk)])",
"def url_to_edit(obj):\n return reverse(\n 'admin:%s_%s_change' % (obj._meta.app_label, obj._meta.model_name),\n args=[obj.id]\n )",
"def get_absolute_url(self):\n return reverse('blogpost-detail', args=[str(self.id)])",
"def link_redirect(request, shortened_url: str):\n try:\n url = Url.objects.get(short_url=shortened_url)\n long_url = url.long_url\n return HttpResponseRedirect(long_url)\n except Url.DoesNotExist or TypeError:\n return HttpResponseBadRequest(\"Wrong url\")",
"def get_absolute_url(self):\n return reverse('blogger-detail', args=[str(self.id)])",
"def _go_to_page(self, object_id=None, **kwargs):\n\n if kwargs and object_id:\n raise Exception(\"Specify an object id or keyword arguments, but not both\")\n\n if kwargs:\n # note: this will raise an exception if no object is found,\n # or if multiple objects are found.\n object_id = self._get_object(**kwargs)[\"Id\"]\n\n url_template = \"{root}/lightning/r/{object_name}/{object_id}/view\"\n url = url_template.format(\n root=self.cumulusci.org.lightning_base_url,\n object_name=self.object_name,\n object_id=object_id,\n )\n self.selenium.go_to(url)\n self.salesforce.wait_until_loading_is_complete()",
"def get_absolute_url(self):\n return reverse('blog-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse('quotes:detail', kwargs={'pk': self.pk})",
"def get_absolute_url(self):\n return reverse('model-detail-view', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse('model-detail-view', args=[str(self.id)])",
"def get_absolute_url(self):\n\t\treturn reverse('source-detail', args=[str(self.id)])",
"def get_success_url(self):\n return reverse('blog-detail', kwargs={'pk': self.kwargs['pk'], })",
"def get_absolute_url(self):\n return reverse('injury-detail', args=[str(self.id)])",
"def brain_dump_entry(user_brain_dump_id):\n\n return redirect(f\"/brain-dump-details/{user_brain_dump_id}\")",
"def get_absolute_url(self):\n return reverse('bl-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n\t\treturn reverse('author-detail', args=[str(self.id)])",
"def route_view(request, code):\n try:\n instance = get_object_or_404(ShortUrl, url_code=code)\n return redirect(instance.long_url, permanent=True)\n except Http404:\n return redirect('/', permanent=True)",
"def edit_redirect_url(self):\n return url_for(self.edit_redirect_to_view)",
"def get_absolute_url(self):\n return reverse('', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse('', args=[str(self.id)])"
] | [
"0.72540265",
"0.65143955",
"0.65035427",
"0.6472303",
"0.6195901",
"0.6094221",
"0.60339874",
"0.60005844",
"0.59937936",
"0.59699005",
"0.59617144",
"0.5954032",
"0.59317863",
"0.59166944",
"0.58625966",
"0.5861316",
"0.58577865",
"0.58354175",
"0.5827268",
"0.5827268",
"0.58133787",
"0.5798689",
"0.5796911",
"0.57948947",
"0.57871604",
"0.5783671",
"0.57680106",
"0.57533437",
"0.5743323",
"0.5743323"
] | 0.7944688 | 0 |
Converts the type (cls) to a class | def get_class(cls):
class Foo(object):
def __init__(self):
pass
x = Foo()
x.__class__ = cls
return x.__class__ | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _class(self, class_):\r\n\r\n if class_:\r\n if hasattr(class_, '__mro__'):\r\n #this is a class\r\n return class_\r\n else:\r\n #this is an instance\r\n return type(class_)",
"def _class(self, *args):\r\n\r\n if hasattr(args[0], '__mro__'):\r\n #this is a class\r\n return args[0]\r\n else:\r\n #this is an instance\r\n return type(args[0])",
"def load_cls(node):\n return node.get_attr(Type).load()",
"def type(cls):",
"def _get_class(self, item):\n\t\t# it's already a class, return it\n\t\tif type(item) == type:\n\t\t\treturn item\n\n\t\t# get the class\n\t\treturn item.__class__",
"def serialize_cls(cls):\n return _create_wrapper_cls(cls)",
"def _get_cls(name, cls):\n return cls.get(name, None) if isinstance(cls, dict) else cls",
"def unclass(self, t):\n if isinstance(t, pytd.ClassType):\n # When t.name and t.cls.name differ (e.g., int vs. builtins.int), the\n # latter is the complete name.\n return pytd.NamedType(t.cls.name)\n else:\n return t",
"def get_cls_for(obj_type):\n return {\n \"workflow\": Workflow\n }[obj_type]",
"def ifc_class(cls):\n classes[cls.__name__.upper()] = cls\n return cls",
"def get_obj_class(self, obj_type: str) -> Type[TgnObject]:\n pass",
"def obj_as_class(obj, new_cls, *args, **kwargs):\n obj_typ = type(obj)\n if obj_typ is bool:\n # HURF DURF MY NAME IS PYTHON AND I CAN'T SUBCLASS bool.\n obj_typ = int\n\n class _Class(obj_typ, new_cls):\n __doc__ = new_cls.__doc__\n\n def __init__(self, obj, *args, **kwargs):\n obj_typ.__init__(self, obj)\n new_cls.__init__(self, *args, **kwargs)\n def __new__(cls, obj, *args, **kwargs):\n return obj_typ.__new__(cls, obj)\n\n\n return _Class(obj, *args, **kwargs)",
"def getclass(instance_or_cls):\n return instance_or_cls if inspect.isclass(instance_or_cls) \\\n else instance_or_cls.__class__",
"def get_class(self, name: str) -> Type:\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(f'new instance of {name}')\n name = self.default_name if name is None else name\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(f'creating instance of {name}')\n class_name, params = self._class_name_params(name)\n return self._find_class(class_name)",
"def _find_class(self, class_name: str) -> Type:\n return self.class_resolver.find_class(class_name)",
"def test_returns_class(self):\n assert type is simple_class().__class__",
"def find_class(self, class_name: str) -> Type:\n pass",
"def get_type_from_string(cls_path: str) -> Type:\n module_name, class_name = cls_path.rsplit(\".\", 1)\n return getattr(import_module(module_name), class_name)",
"def class_casting(obj: object, cls: type):\n orig_cls = obj.__class__\n obj.__class__ = cls\n yield\n obj.__class__ = orig_cls",
"def __class__(self, ???):",
"def db_to_class(cls, record):\n raise NotImplementedError()",
"def type(self) -> Type[ClassType]:\n return self._type",
"def stringToClass(cls_str):\n import_stg1 = cls_str.split(\" \")[1]\n import_stg2 = import_stg1.replace(\"'\", \"\")\n import_stg3 = import_stg2.replace(\">\", \"\")\n import_parse = import_stg3.split(\".\")\n cls = import_parse[-1]\n import_path = '.'.join(import_parse[:-1])\n import_statement = \"from %s import %s\" % (import_path, cls)\n exec(import_statement)\n assign_statement = \"this_class = %s\" % cls\n exec(assign_statement)\n return this_class",
"def kind_to_class(klass, kind):\n return getattr(sys.modules['model'], kind, None)",
"def get_class(cls):\n return '{}.{}'.format(cls.__module__, cls.__name__)",
"def jit_class(cls):\n from mindspore import nn\n # Check if cls is of type class.\n if not inspect.isclass(cls):\n raise TypeError(f'Decorator jit_class can only be used for class type, but got {cls}.')\n # Check if cls is nn.Cell.\n if issubclass(cls, nn.Cell):\n raise TypeError(f\"Decorator jit_class is used for user-defined classes and cannot be used for nn.Cell: {cls}.\")\n setattr(cls, '__ms_class__', True)\n return cls",
"def create_class(self):\n temp_class = self.temp('separated.class')\n return temp_class.format(**self.__dict__)",
"def ms_class(cls):\n\n logger.warning(\"'mindspore.ms_class' will be deprecated and removed in a future version. \"\n \"Please use 'mindspore.jit_class' instead.\")\n\n # Check if cls is of type class.\n if not inspect.isclass(cls):\n raise TypeError(f'Decorator ms_class can only be used for class type, but got {cls}.')\n # Check if cls is nn.Cell.\n if issubclass(cls, ms.nn.Cell):\n raise TypeError(f\"Decorator ms_class is used for user-defined classes and cannot be used for nn.Cell: {cls}.\")\n logger.info(f'Found ms_class: {cls}.')\n setattr(cls, '__ms_class__', True)\n return cls",
"def identify_class(self, cls):",
"def of_type(self, class_):\n\n return self.operate(PropComparator.of_type_op, class_)"
] | [
"0.69712764",
"0.69088846",
"0.6750393",
"0.66642344",
"0.6608613",
"0.6541871",
"0.6487506",
"0.6476722",
"0.6397707",
"0.6386939",
"0.6368144",
"0.6345723",
"0.630231",
"0.6275217",
"0.62611365",
"0.6149443",
"0.6148302",
"0.61340964",
"0.61101025",
"0.6106781",
"0.6064178",
"0.6057755",
"0.60363144",
"0.59701073",
"0.5963197",
"0.58885926",
"0.58572674",
"0.5842163",
"0.57978463",
"0.5779567"
] | 0.71117383 | 0 |
Adds newclasses to list of base classes. Creates a new class for the instance. Original class is in bases[0] | def add_base_classes(x, newclasses):
bases = list(x.__class__.__bases__)
if bases[0] is object:
bases[0] = x.__class__
if any(x in bases for x in newclasses):
raise PermitError("Cannot insert duplicate classes.")
bases = bases + newclasses
x.__class__ = type(x.__class__.__name__, tuple(bases), x.__dict__)
return newclasses | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_class(self, new_class):\n index = self._counter\n self._counter += 1\n for element in new_class:\n self._class_names[element] = index\n node = self.part[index].append(element)\n self._place[element] = node",
"def with_metaclass(meta, *bases):\r\n return meta(\"NewBase\", bases, {})",
"def with_metaclass(meta, *bases):\r\n return meta(\"NewBase\", bases, {})",
"def with_metaclass(meta, *bases):\n return meta('NewBase', bases, {})",
"def with_metaclass(meta, *bases):\n return meta(\"NewBase\", bases, {})",
"def add_obj_classes(self, new_obj_classes: List[ObjClass]) -> ProjectMeta:\n return self.clone(obj_classes=self.obj_classes.add_items(new_obj_classes))",
"def make_class(attributes, base_classes=()):\r\n \"*** YOUR CODE HERE ***\"",
"def __new__(metacls, name, bases, classdict):\n\n for b in bases:\n if isinstance(b, Final):\n raise TypeError(\n \"type '{0}' is not an acceptable base type\".format(b.__name__)\n )\n return type.__new__(metacls, name, bases, dict(classdict))",
"def resolve_base_classes(classes):\n for cl in classes.values():\n resolved = []\n for base in cl['bases']:\n if base in classes:\n resolved.append(base)\n cl['resolved_bases'] = resolved",
"def remove_base_class(x, cls):\n bases = list(x.__class__.__bases__)\n original_class = bases[0]\n other_classes = bases[1:]\n if cls in other_classes:\n other_classes.remove(cls)\n else:\n raise PermitError(\"Class {0} not in list of base classes {1}\".format(cls, bases))\n if len(other_classes) == 0:\n x.__class__ = original_class\n else:\n x.__class__ = type(x.__class__.__name__, tuple([original_class] + other_classes), x.__dict__)\n return cls",
"def parse_bases(self, node, clsobj):\n bases = []\n for b in node.bases:\n if not (isinstance(b, Name) and b.id == KW_PROCESS_DEF):\n self.current_context = Read(clsobj)\n bases.append(self.visit(b))\n if isinstance(clsobj, dast.Process):\n # try to resolve the base classes:\n for b in bases:\n try:\n pd = self.resolver.find_process_definiton(b)\n clsobj.merge_scope(pd)\n except ResolverException as e:\n self.warn('unable to resolve base class spec, '\n 'compilation may be incomplete: {}.'\n .format(e.reason), e.node if e.node else b)\n return bases",
"def add_class(self, klass):\n if not issubclass(klass, DataClayObject):\n raise DataClayException(\"Can only use DataClayObject classes\")\n\n logger.debug(\"Adding class %s to the MetaClassFactory\", klass)\n class_container = klass._prepare_metaclass(self._namespace, self._responsible_account)\n\n # Save to the list, and bookmark the MetaClass\n # (for valid recursive behaviour, e.g. cycles)\n complete_name = class_container.name\n logger.debug(\"[add_class] Using `%s` as `name` field of Type\", complete_name)\n if complete_name not in self.types:\n self.types[complete_name] = UserType(\n signature=\"L{};\".format(complete_name).replace(\".\", \"/\"),\n includes=[],\n namespace=self._namespace,\n typeName=complete_name,\n )\n self.classes.append(class_container)\n\n parent = klass.__bases__[0]\n if parent is not DataClayObject:\n self.add_class(parent)\n\n logger.debug(\"Class %s finished\", class_container.name)",
"def update_derived_class_records():\n derive_class_hierarchy()",
"def add_base_class(\n existing_object: Any,\n import_method: Callable[[Any], Any],\n export_method: Callable[[Any], Any],\n):\n existing_object.export_data = types.MethodType(export_method, existing_object)\n existing_object.import_data = types.MethodType(import_method, existing_object)",
"def add_base(li):\r\n\t\tnew_li = []\r\n\t\tfor s in li:\r\n\t\t\tfor b in bases:\r\n\t\t\t\tnew_li.append(s+b)\r\n\t\treturn new_li",
"def with_metaclass(meta, base=object):\r\n return meta(\"NewBase\", (base,), {})",
"def _super(cls, bases):\n super = [base for base in bases if isinstance(base, Registered)]\n return super[0] if len(super) else Registered",
"def process_class_list(self, module, classes):",
"def _get_classes(package_name, base_class):\n classes = {}\n\n base_dir = os.getcwd()\n root_module_name = base_dir.split('/')[-1]\n package_dir = base_dir + '/%s' % package_name\n if os.path.isdir(package_dir):\n for module_path in os.listdir(package_dir):\n if not module_path.endswith('.py'):\n continue\n\n module_name = os.path.splitext(module_path)[0]\n module_full_name = '%s.%s.%s' % (root_module_name, package_name, module_name)\n __import__(module_full_name)\n work_module = sys.modules[module_full_name]\n for module_item in work_module.__dict__.values():\n if type(module_item) is type \\\n and issubclass(module_item, base_class) \\\n and module_item is not base_class\\\n and hasattr(module_item, 'name') and module_item.name:\n classes.setdefault(module_item.name, []).append(module_item)\n\n # check no duplicated names\n for work_name, work_modules in classes.items():\n if len(work_modules) > 1:\n raise DuplicatedNameException('Modules %s have same name \"%s\"' % (\n ' and '.join(map(str, work_modules)),\n work_name\n ))\n\n # create immutable list of modules\n return tuple([(work_name, work_modules[0]) for work_name, work_modules in classes.items()])",
"def move_to_new_class(self, elements_to_move):\n for element in elements_to_move:\n place = self._place[element]\n place.delete()\n self.add_class(elements_to_move)",
"def register_classes():\n CoaddSplit.register_class()\n CoaddSplit_SG.register_class()",
"def with_metaclass(meta, base=object):\n return meta(\"NewBase\", (base,), {})",
"def with_metaclass(meta, base=object):\n return meta(\"NewBase\", (base,), {})",
"def get_intermediate_classes(cls, baseclass):\n classes = inspect.getmro(cls)\n classes = [c for c in classes if issubclass(c, baseclass)]\n return classes",
"def add_obj_class(self, new_obj_class: ObjClass) -> ProjectMeta:\n return self.add_obj_classes([new_obj_class])",
"def __new__(cls, name):\n for sub_cls in cls.__subclasses__():\n if sub_cls.__name__ == name:\n return super().__new__(sub_cls)",
"def get_class(name, base_class, *modules):\n for cls in iter_classes(base_class, *modules, class_filter=lambda x: x.__module__.split('.')[-1] == name):\n return cls\n return None",
"def register_classes(\n self, module: ModuleType, base_cls: Type, override: bool = False, show_deprecation: bool = True\n ) -> None:\n for cls in self.get_members(module, base_cls):\n self(cls=cls, override=override, show_deprecation=show_deprecation)",
"def __new__(cls, name, bases, attrs):\n super_new = super(ClientMeta, cls).__new__\n\n # Create the class.\n module = attrs.pop('__module__')\n base_attrs = {'__module__': module,\n '_exchanges':[],\n '_queues':[],\n '_consumers':[],\n '_tasks':[],\n }\n new_class = super_new(cls, name, bases, base_attrs)\n\n for obj_name, obj in attrs.items():\n new_class.add_to_class(obj_name, obj)\n\n return new_class",
"def __new__(meta, classname, bases, classDict):\n obj = super(TestMetaClass, meta).__new__(meta, classname, bases, classDict)\n print(('TestMetaClass.__new__ called. got new obj id=0x%x' % id(obj)))\n return obj"
] | [
"0.66618234",
"0.64001256",
"0.64001256",
"0.6344961",
"0.6301278",
"0.62983423",
"0.6222371",
"0.6013382",
"0.5826684",
"0.5763189",
"0.57119316",
"0.57008797",
"0.56934845",
"0.56647617",
"0.5627335",
"0.5617749",
"0.5607377",
"0.5586276",
"0.55615836",
"0.5545617",
"0.5525693",
"0.5521772",
"0.5521772",
"0.54828435",
"0.5419644",
"0.5409919",
"0.5393596",
"0.53854865",
"0.5366166",
"0.53614545"
] | 0.82117367 | 0 |
Removes base classes. If there are no more base classes, returns the original class slotted at bases[0] | def remove_base_class(x, cls):
bases = list(x.__class__.__bases__)
original_class = bases[0]
other_classes = bases[1:]
if cls in other_classes:
other_classes.remove(cls)
else:
raise PermitError("Class {0} not in list of base classes {1}".format(cls, bases))
if len(other_classes) == 0:
x.__class__ = original_class
else:
x.__class__ = type(x.__class__.__name__, tuple([original_class] + other_classes), x.__dict__)
return cls | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mro(cls): # pragma: no cover\n if hasattr(cls, 'mro'):\n return cls.__mro__\n\n def _mro(cls):\n m = [cls]\n for base in cls.__bases__:\n m += _mro(base)\n return m\n\n mro_list = _mro(cls)\n\n # If a class appears multiple times (due to multiple inheritance) remove\n # all but the last occurence. This matches the method resolution order\n # of MicroPython, but not CPython.\n mro_pruned = []\n for i in range(len(mro_list)):\n base = mro_list.pop(0)\n if base not in mro_list:\n mro_pruned.append(base)\n return mro_pruned",
"def _super(cls, bases):\n super = [base for base in bases if isinstance(base, Registered)]\n return super[0] if len(super) else Registered",
"def get_class(name, base_class, *modules):\n for cls in iter_classes(base_class, *modules, class_filter=lambda x: x.__module__.split('.')[-1] == name):\n return cls\n return None",
"def get_bases(self):\n return self.py_class.__bases__",
"def resolve_base_classes(classes):\n for cl in classes.values():\n resolved = []\n for base in cl['bases']:\n if base in classes:\n resolved.append(base)\n cl['resolved_bases'] = resolved",
"def add_base_classes(x, newclasses):\n bases = list(x.__class__.__bases__)\n if bases[0] is object:\n bases[0] = x.__class__\n if any(x in bases for x in newclasses):\n raise PermitError(\"Cannot insert duplicate classes.\")\n bases = bases + newclasses\n x.__class__ = type(x.__class__.__name__, tuple(bases), x.__dict__)\n return newclasses",
"def parse_bases(self, node, clsobj):\n bases = []\n for b in node.bases:\n if not (isinstance(b, Name) and b.id == KW_PROCESS_DEF):\n self.current_context = Read(clsobj)\n bases.append(self.visit(b))\n if isinstance(clsobj, dast.Process):\n # try to resolve the base classes:\n for b in bases:\n try:\n pd = self.resolver.find_process_definiton(b)\n clsobj.merge_scope(pd)\n except ResolverException as e:\n self.warn('unable to resolve base class spec, '\n 'compilation may be incomplete: {}.'\n .format(e.reason), e.node if e.node else b)\n return bases",
"def remove_subclass_objects(cls):\n # get the content type\n cont_type = ContentType.objects.get_for_model(cls)\n \n # return only this cont type\n return cls.objects.filter(content_type=cont_type)",
"def get_bases(self):\n # TODO: subclassing\n return (self.py_class,)",
"def autodoc_process_bases(app, name, obj, options, bases):\n # Determine the bases to be removed\n remove_bases = []\n for base in bases:\n if base.__name__[0] == \"_\" or \"Mixin\" in base.__name__:\n remove_bases.append(base)\n\n # Remove from the bases list in-place\n for base in remove_bases:\n bases.remove(base)",
"def _get_all_bases(class_or_name: Union[str, Type]) -> List[str]:\n if isinstance(class_or_name, str):\n return [class_or_name]\n\n classes = [class_or_name.__name__]\n for base in class_or_name.__bases__:\n classes.extend(_get_all_bases(base))\n\n return deduplicate(classes)",
"def get_intermediate_classes(cls, baseclass):\n classes = inspect.getmro(cls)\n classes = [c for c in classes if issubclass(c, baseclass)]\n return classes",
"def child_class(class_types: Collection[type], base_class: type) -> type | None:\n subclasses = set()\n for class_type in class_types:\n if class_type is base_class:\n continue\n if issubclass(class_type, base_class):\n subclasses.add(class_type)\n\n if len(subclasses) == 0:\n return None\n elif len(subclasses) == 1:\n return subclasses.pop()\n else:\n # If more than one class is a subclass of `base_class`\n # It is possible that one or more classes are subclasses of another\n # class (see example above).\n # Recursively find the child-most class. Break ties by returning any\n # child-most class.\n for c in subclasses:\n child = child_class(subclasses, c)\n if child is not None:\n return child\n return subclasses.pop()",
"def GetClassBases(self,cls):\n name = \"\"\n for i in cls:\n if i != \")\":\n name+=i\n\n lst = name.split(\"(\")\n cls_lst = lst[-1].split(\",\")\n if cls_lst:\n return cls_lst\n else:\n return False",
"def get_all_superclasses(cls):\n classes = []\n for superclass in cls.__bases__:\n for c in get_all_superclasses(superclass):\n if c is not object and c not in classes:\n classes.append(c)\n for superclass in cls.__bases__:\n if superclass is not object and superclass not in classes:\n classes.append(superclass)\n\n return classes",
"def deregister_class(self, name):\n isbase = name in self.base_types\n if not isbase and name not in self.template_types:\n _raise_type_error(name)\n if isbase:\n self.base_types.remove(name)\n else:\n self.template_types.pop(name, None)\n\n self.cython_ctypes.pop(name, None)\n self.cython_cytypes.pop(name, None)\n self.cython_pytypes.pop(name, None)\n self.from_pytypes.pop(name, None)\n self.cpp_types.pop(name, None)\n self.humannames.pop(name, None)\n self.cython_cimports.pop(name, None)\n self.cython_cyimports.pop(name, None)\n self.cython_pyimports.pop(name, None)\n\n self.cython_c2py_conv.pop(name, None)\n self.cython_py2c_conv.pop(name, None)\n self.cython_classnames.pop(name, None)\n\n self.clearmemo()",
"def _removeBasesClicked(self):\n part = self._modelPart\n stepSize = part.stepSize()\n # first find out the right edge of the part\n idx = part.indexOfRightmostNonemptyBase()\n # next snap to a multiple of stepsize\n idx = int(ceil(float(idx+1)/stepSize))*stepSize\n # finally, make sure we're a minimum of stepSize bases\n idx = util.clamp(idx, part.stepSize(), 10000)\n delta = idx - (part.maxBaseIdx() + 1)\n if delta < 0:\n part.resizeVirtualHelices(0, delta)\n if app().isInMaya():\n import maya.cmds as cmds\n cmds.select(clear=True)",
"def _classes_(cls):\n for base_cls in cls.__bases__:\n # Avoid infinite loop\n if base_cls == Sandbox:\n continue\n\n yield base_cls",
"def derived_classes(self, what: Union[GDScriptClass, str, int]):\n base_cls: Optional[GDScriptClass] = None\n if isinstance(what, GDScriptClass):\n base_cls = what\n else:\n base_cls = self.get_class(what)\n\n for cls in self._classes_by_type_id.values():\n if cls.base == base_cls:\n yield cls",
"def clearClasses(self):\n clearcls = self._clearClasses\n evclass = getattr(self, \"eventClass\", None)\n sev = getattr(self, 'severity', None)\n if evclass and sev == 0: \n clearcls.append(self.eventClass)\n\n # collapse out duplicates\n clearcls = list(set(clearcls))\n return clearcls",
"def all_bases(cls, bases=None):\n return cls._ALLOWED_BASES if not bases else bases",
"def get_flat_bases():\n\trs = []\n\tfor bs in get_bases():\n\t\tfor b in bs:\n\t\t\trs.append(b)\n\treturn rs",
"def parent_class_modules(cls):\n if not issubclass(cls, spack.package_base.PackageBase) or issubclass(\n spack.package_base.PackageBase, cls\n ):\n return []\n result = []\n module = sys.modules.get(cls.__module__)\n if module:\n result = [module]\n for c in cls.__bases__:\n result.extend(parent_class_modules(c))\n return result",
"def base_cls(self):\n return self.base()",
"def _get_all_loaded_classes(self):\n classes = {}\n for module in self.modules.values():\n for k,v in module.__dict__.items():\n # skip anything that's not a game class\n if not type(v) is type:\n continue\n base_classes = (game_object.GameObject, game_hud.GameHUD, game_room.GameRoom)\n # TODO: find out why above works but below doesn't!! O___O\n #base_classes = self.builtin_base_classes\n if issubclass(v, base_classes):\n classes[k] = v\n return classes",
"def inheritors(cls):\n subclasses = set()\n work = [cls]\n while work:\n parent = work.pop()\n for child in parent.__subclasses__():\n if child not in subclasses:\n subclasses.add(child)\n work.append(child)\n return subclasses",
"def getleafsubclasses(cls):\n scls = itersubclasses(cls)\n return [s for s in scls if not s.__subclasses__()]",
"def _get_classes(package_name, base_class):\n classes = {}\n\n base_dir = os.getcwd()\n root_module_name = base_dir.split('/')[-1]\n package_dir = base_dir + '/%s' % package_name\n if os.path.isdir(package_dir):\n for module_path in os.listdir(package_dir):\n if not module_path.endswith('.py'):\n continue\n\n module_name = os.path.splitext(module_path)[0]\n module_full_name = '%s.%s.%s' % (root_module_name, package_name, module_name)\n __import__(module_full_name)\n work_module = sys.modules[module_full_name]\n for module_item in work_module.__dict__.values():\n if type(module_item) is type \\\n and issubclass(module_item, base_class) \\\n and module_item is not base_class\\\n and hasattr(module_item, 'name') and module_item.name:\n classes.setdefault(module_item.name, []).append(module_item)\n\n # check no duplicated names\n for work_name, work_modules in classes.items():\n if len(work_modules) > 1:\n raise DuplicatedNameException('Modules %s have same name \"%s\"' % (\n ' and '.join(map(str, work_modules)),\n work_name\n ))\n\n # create immutable list of modules\n return tuple([(work_name, work_modules[0]) for work_name, work_modules in classes.items()])",
"def base_cls_name(self):\n return self.base().__name__",
"def _get_base_class_names(frame):\n co, lasti = frame.f_code, frame.f_lasti\n code = co.co_code\n i = 0\n extended_arg = 0\n extends = []\n while i <= lasti:\n c = code[i]\n op = ord(c)\n i += 1\n if op >= dis.HAVE_ARGUMENT:\n oparg = ord(code[i]) + ord(code[i + 1]) * 256 + extended_arg\n extended_arg = 0\n i += 2\n if op == dis.EXTENDED_ARG:\n extended_arg = oparg * int(65536)\n if op in dis.hasconst:\n if type(co.co_consts[oparg]) == str:\n extends = []\n elif op in dis.hasname:\n if dis.opname[op] == 'LOAD_NAME':\n extends.append(('name', co.co_names[oparg]))\n if dis.opname[op] == 'LOAD_ATTR':\n extends.append(('attr', co.co_names[oparg]))\n items = []\n previous_item = []\n for t, s in extends:\n if t == 'name':\n if previous_item:\n items.append(previous_item)\n previous_item = [s]\n else:\n previous_item += [s]\n if previous_item:\n items.append(previous_item)\n return items"
] | [
"0.6278506",
"0.5917506",
"0.5868634",
"0.5854781",
"0.58289456",
"0.5770308",
"0.57575125",
"0.5735956",
"0.5680516",
"0.5679301",
"0.56645495",
"0.56414235",
"0.55493724",
"0.55452394",
"0.553261",
"0.55212045",
"0.54923844",
"0.5323664",
"0.5301268",
"0.5242089",
"0.51389015",
"0.50566435",
"0.50541085",
"0.5052239",
"0.5048911",
"0.5021055",
"0.50170225",
"0.49767107",
"0.49654698",
"0.49412057"
] | 0.74186534 | 0 |
Saves samples collected from 3Axis Accelerometers. Samples should be recieved in form (ID X Y Z Time /r/n) with a space to seperate the data. Invalid data is rejected and any samples lost this way are recorded and displayed to the user once the function finishes. All samples are collected from the port defined by the serialPort(string) parameter and the function will continue until it saves a number of samples defined by the NO_SAMPLES(int) parameter. All samples will be appended to the list specified by the log(list) parameter. This should either be an empty list or a list with 6 columns. | def collect_samples(serialPort,NO_SENSORS,NO_SAMPLES,log):
run = '1'
badSamples = 0
count = 1
log_temp = []
temp = [0] * 20
NO_FIELDS = (NO_SENSORS * 3) + 1
while (run == '1'):
# If the input buffer is not empty read the data out into rawData using \n as a delimiter.
if (serialPort.inWaiting()>0):
rawData = serialPort.readline()
print(rawData)
# If invalid data is recieved this prevents program crash
try:
# Decode the bytes into a string
data = rawData.decode()
# Split x, y, z and newline values into a list
if (count >= (NO_SAMPLES + 1)):
endTime_temp = data.split(" ", 2)
if (len(endTime_temp) == 2 and '' not in endTime_temp):
endTime = int(endTime_temp[0])
else:
endTime = 780
print('Time not recieved')
print('Lost Samples: ' + str(badSamples))
run = '0'
else:
data_readings = data.split(" ", NO_FIELDS)
print(data_readings)
# A correct sample should contain 16 values and not include null and so this is used
# to validate the data and record any samples that are discarded in this way
if (len(data_readings) == NO_FIELDS and '' not in data_readings):
# Discard newline characters before saving data
int_data_readings = list(map(int,data_readings[:(NO_FIELDS - 1)]))
log_temp.append(int_data_readings)
else:
badSamples += 1
except:
print('Invalid data recieved')
count += 1
samplingPeriod = (endTime/NO_SAMPLES)/NO_SENSORS
timeStamp = 0.0
for i in range(0,len(log_temp)):
for j in range(0,NO_SENSORS):
temp[0+(j*4)] = log_temp[i][0+(j*3)]
temp[1+(j*4)] = log_temp[i][1+(j*3)]
temp[2+(j*4)] = log_temp[i][2+(j*3)]
temp[3+(j*4)] = timeStamp
timeStamp += samplingPeriod
log.append(temp.copy()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def record_data(self, no_of_samples, interval):\r\n\r\n #tempory storage while function is completing\r\n temp_return_list = []\r\n\r\n #colec\r\n for i in range(0,no_of_samples):\r\n\r\n print(i)\r\n sensor_value = self.sen.get_sensor_value()\r\n\r\n temp_return_list.append([sensor_value,(i*interval)])\r\n\r\n time.sleep(interval)\r\n\r\n \r\n \r\n self.return_data = temp_return_list",
"def add_sample(self, time_received, current_label, emg_list, accel_1, accel_2, accel_3, gyro_1, gyro_2,\n gyro_3, orient_w, orient_x, orient_y, orient_z):\n\n self.add_data_lock.lock()\n\n self.timestamps.append(time_received)\n self.labels.append(current_label)\n\n for i, emg_channel in enumerate(emg_list):\n self.emg[i].append(emg_channel)\n\n self.accel[0].append(accel_1 / MYOHW_ACCELEROMETER_SCALE)\n self.accel[1].append(accel_2 / MYOHW_ACCELEROMETER_SCALE)\n self.accel[2].append(accel_3 / MYOHW_ACCELEROMETER_SCALE)\n\n self.gyro[0].append(gyro_1 / MYOHW_GYROSCOPE_SCALE)\n self.gyro[1].append(gyro_2 / MYOHW_GYROSCOPE_SCALE)\n self.gyro[2].append(gyro_3 / MYOHW_GYROSCOPE_SCALE)\n\n self.orient[0].append(orient_w / MYOHW_ORIENTATION_SCALE)\n self.orient[1].append(orient_x / MYOHW_ORIENTATION_SCALE)\n self.orient[2].append(orient_y / MYOHW_ORIENTATION_SCALE)\n self.orient[3].append(orient_z / MYOHW_ORIENTATION_SCALE)\n\n self.sync_data(self.is_master)\n\n self.add_data_lock.unlock()",
"def read_live_data(wearable_port):\r\n IMU1_num = []\r\n IMU2_num = []\r\n IMU3_num = []\r\n\r\n try:\r\n wearable = serial.Serial(wearable_port, baudrate=115200, timeout=5)\r\n #arduino = serial.Serial(arduino_port, timeout=1)\r\n # Delay for 2 seconds to wait for serial port to be ready.\r\n print(\"Waiting 2 seconds for serial to be ready.\")\r\n time.sleep(2)\r\n except Exception as e:\r\n print(e)\r\n print('Please check the port')\r\n return\r\n\r\n input(\"Press Enter to continue...\")\r\n str(wearable.write(bytes(33)))\r\n # Open file to store the data; filename includes date and time; format: data-YYYYMMDDHHmmss.csv\r\n filename = \"data-\" + str(dt.datetime.now().strftime(\"%Y%m%d%H%M%S\")) + \".csv\"\r\n filenamplot = \"plot-\" + str(dt.datetime.now().strftime(\"%Y%m%d%H%M%S\")) + \".png\"\r\n print(\"Opening %s\" % filename)\r\n f = open(filename, \"a+\")\r\n # f.write(\"power,rpm\\n\")\r\n count = 1000\r\n # Get data and continuously yield Power and RPM as integers\r\n\r\n while (count >0):\r\n count = count -1\r\n #if arduino.in_waiting > 0:\r\n wearable.flushInput()\r\n\r\n '''\r\n arduino_output = arduino.readline().decode(\"utf_8\", \"strict\")\r\n print(\"Distance: %s\" % arduino_output)\r\n f.writelines(\"%s\" % arduino_output)\r\n if arduino_output == \"Hard Stop\\r\\n\":\r\n break\r\n arduino_output = arduino_output.replace(\"\\r\\n\", \"\")\r\n Distance.append(int(float(arduino_output)))\r\n '''\r\n\r\n try:\r\n data = wearable.readline().decode(\"utf_8\", \"strict\")\r\n data = data.replace(\"\\r\\n\", \"\\n\").split()\r\n IMU1= data[2].replace(\"\\n\", \"\")\r\n IMU1_num.append(int(IMU1))\r\n IMU2 = data[3].replace(\"\\n\", \"\")\r\n IMU2_num.append(int(IMU2))\r\n IMU3 = data[4].replace(\"\\n\", \"\")\r\n IMU3_num.append(int(IMU3))\r\n print(\"IMU1: %s\\t IMU2: %s\\t IMU3: %s\\t\" % (IMU1, IMU2, IMU3))\r\n f.writelines(\"%s,%s,%s,%s\\n\" % (IMU1, IMU2, IMU3))\r\n yield int(IMU1), int(IMU2), int(IMU3)\r\n except Exception as e:\r\n print('error')\r\n f.writelines(\"Error\\n\")\r\n\r\n print('Program ended.')\r\n t = numpy.linspace(1, len(IMU1_num), len(IMU1_num))\r\n fig, (ax1) = plt.subplots(nrows=1, ncols=1, figsize=(16.0, 9.0)) # create figure & 1 axis\r\n ax1.plot(t, IMU1_num, t, IMU2_num,t, IMU3_num)\r\n ax1.set_title('IMU')\r\n ax1.legend(('IMU1', 'IMU2', 'IMU3'))\r\n # manager = plt.get_current_fig_manager()\r\n # manager.resize(*manager.window.maxsize())\r\n fig.savefig(filenamplot)\r\n plt.show()\r\n\r\n f.close()\r\n #arduino.close()\r\n wearable.close()",
"def example_data_file():\n\n header1 = \"#Sample Interval: 0.100000 (seconds)\"\n header2 = \"Timestamp,AccelX,AccelY,RateX,RateY\"\n header3 = \"dd-mmm-yyyy HH:MM:SS.FFF,mm/s2,mm/s2,rad/s,rad/s\"\n\n start_date = dt.datetime(2016, 3, 17, 1, 0, 0)\n\n # Add one tenth of a second\n time_delta = dt.timedelta(0, 0, 0, 100)\n\n # Sample frequency in Hz\n sample_freq = 10\n\n # 20 in event duration in seconds\n Ts = 60 * 20\n\n # Number of points\n N = Ts * sample_freq\n\n # Array of times\n time = [start_date + i * time_delta for i in range(N)]\n time_str = [t.strftime(\"%Y-%m-%d %H:%M:%S.%f\") for t in time]\n\n ax, ay, Rx, Ry = example_data(sample_freq, Ts)\n\n data = [\n \",\".join([time_str[i], str(ax[i]), str(ay[i]), str(Rx[i]), str(Ry[i])]) for i in range(N)\n ]\n\n data.insert(0, header3)\n data.insert(0, header2)\n data.insert(0, header1)\n\n return \"\\n\".join(data)",
"def collect_samples(self):\n self.__running = True\n with open(self.__filename, 'a') as output:\n next_sample_time = time.time()\n while self.__running:\n sensor_name = self.__sensor.get_sensor_type_name()\n sensor_id = self.__sensor.get_sensor_id()\n data = self.__sensor.retrieve_data_string() \n if DEBUG:\n print('data: \"{}\"'.format(data),\n file = sys.stderr, flush=True)\n when = datetime.datetime.now(datetime.timezone.utc).isoformat()\n result = OUTPUT_FORMAT.format(when,\n sensor_name, \n sensor_id, \n data)\n output.write(result)\n output.flush()\n \n next_sample_time = next_sample_time + self.__interval\n delay_time = next_sample_time - time.time()\n if DEBUG:\n print('delay_time = {}'.format(delay_time),\n file=sys.stderr, flush=True)\n \n if 0 < delay_time: # don't sleep if already next sample time\n time.sleep(delay_time)",
"def record_chunk(self):\n data = self.stream.read(nFFT)\n data_array = bytearray(data)\n self.cur_input = []\n for i in range(nFFT):\n amp = struct.unpack('H', data_array[:2])\n for _ in range(2):\n data_array.pop(0)\n self.cur_input.append(amp)",
"def read_and_display_data(hat, samples_per_channel, num_channels): \n \n # Read all of the available samples (up to the size of the read_buffer which\n # is specified by the user_buffer_size). Since the read_request_size is set\n # to -1 (READ_ALL_AVAILABLE), this function returns immediately with\n # whatever samples are available (up to user_buffer_size) and the timeout\n # parameter is ignored.\n total_samples_read = 0\n read_request_size = READ_ALL_AVAILABLE\n completeFlag = 0 \n timeout = 5.0\n \n # file switch: w = Write to a file\n # file switch: w+ = Write to a file, if it doesn't exist create it\n # file switch: a = Append to a file\n # file switch: a+ = Append to a file, if is doesn't exist create it.\n # file switch: x = will create a file, returns an error if the file exist\n \n\n # If the scan starts, create a file name based upon current date and time.\n # Retrieve the Current Working Directory and generate the full path \n # to where to write the collected data as a .csv file. Open the file \n # begin writing the data to the file. When done, close the file.\n \n try:\n if os.path.exists(basepath):\n if not (os.path.exists(mypath)):\n os.mkdir(mypath)\n else:\n os.mkdir(basepath)\n os.chdir(basepath)\n os.mkdir(mypath)\n except OSError as exc:\n raise\n \n os.chdir(mypath)\n fileDateTime = datetime.strftime(datetime.now(), \"(%m_%d_%Y)-(%H-%M-%S)\")\n #filePath = mypath + \"/\" + DAQ_NAME + \"_\" + fileName + \".csv\"\n filePath = mypath + \"/\" + DAQ_NAME + \"_\" + fileDateTime + \".csv\"\n csvfile = open(filePath, \"w+\")\n csvwriter = csv.writer(csvfile) \n \n # Recording LED\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(RECORDING_LED, GPIO.OUT, initial=GPIO.LOW)\n GPIO.output(RECORDING_LED,GPIO.HIGH)\n \n while total_samples_read < samples_per_channel:\n read_result = hat.a_in_scan_read(read_request_size, timeout)\n\n # Check for an overrun error\n if read_result.hardware_overrun:\n print('\\n\\nHardware overrun\\n')\n break\n elif read_result.buffer_overrun:\n print('\\n\\nBuffer overrun\\n')\n break\n elif not (read_result.running and completeFlag == 0):\n completeFlag = 1\n print('\\n (2) Recording Completed - Buffer Draining')\n\n samples_read_per_channel = int(len(read_result.data) / num_channels)\n total_samples_read += samples_read_per_channel\n \n totalSamples = len(read_result.data) \n\n if samples_read_per_channel > 0:\n index = samples_read_per_channel * num_channels - num_channels\n \n new_index = 0\n myArray=[] #create an empty array\n for i in range(0, totalSamples, num_channels):\n myArray.append([]) #add a row to the array (COLUMN)\n for j in range(num_channels):\n\t\t\t\t\t#append a num_channels of data to the array (ROW)\n myArray[new_index].append(read_result.data[i + j]) \n new_index+=1\n\n csvwriter.writerows(myArray) #Write the array to file\n csvfile.flush\n\n # Cleanup\n csvfile.close() \n print('\\n (3) Buffer Drained - Data Saved to CSV File\\n')\n GPIO.cleanup()\n GPIO.setmode(GPIO.BCM)\n \n # Complete LED\n GPIO.setup(COMPLETE_LED, GPIO.OUT, initial=GPIO.LOW)\n GPIO.output(COMPLETE_LED,GPIO.HIGH)\n time.sleep(5)\n GPIO.cleanup()\n hat.a_in_scan_cleanup()\n global CMD_RECEIVED\n CMD_RECEIVED = 1\n \n # Restarts script to prepare for another recording\n main()",
"def record_screen(startX, startY, endX, endY):\n\n # some delay, which allows us to focus game window and start driving\n print(\"Starting collecting data in...\")\n for i in range(3, 0, -1):\n print(i)\n time.sleep(1)\n\n\n # initing joystick\n pygame.init()\n pygame.event.pump()\n pygame.joystick.init()\n joystick = pygame.joystick.Joystick(0)\n joystick.init()\n with open(constants.DATA_FILENAME, 'ab') as f:\n\n\n\n data = []\n\n count = 0 # keeping a counter to know how much data we have collected in this run so far\n\n while (True):\n\n screenshot = ImageGrab.grab(bbox=(startX, startY, endX, endY))\n\n joystick_values = get_joystick_axis_values(joystick)\n joystick_values = data_preprocessing.transform_output_labels(joystick_values)\n\n screenshot = data_preprocessing.transform_image(np.array(screenshot))\n\n data.append([screenshot, joystick_values])\n\n # write to file every time we collect another 1000 data samples\n if (count % 1000 == 0 and count != 0):\n print('Collected data count - {0}.'.format(count))\n pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)\n data = [] # clear the data from memory\n\n count += 1",
"def getData(self):\n self.ser.write(b'g')\n readString = self.ser.readline()\n print(readString)\n readString = readString.decode(\"utf-8\")\n splittedString = readString.split('\\t')\n for i, num in enumerate(splittedString):\n try:\n splittedString[i] = int(float(num))\n except ValueError:\n pass\n self.accString.set('Accleration\\nX: %.5f\\nY: %.5f\\nZ: %.5f' %\n (splittedString[0], splittedString[1],\n splittedString[2]))\n self.logFile.write(readString)\n self.comJob = root.after(10, self.getData)",
"def add_data(self, label, description='', datapath='', samples=[], fibres=[], data_type='', date_created='', verbose = True):\n assert (self.connected)\n assert(type(label) == str)\n assert(type(datapath) == str)\n assert(type(samples) == list and len(samples) <= 4)\n assert(type(fibres) == list and len(fibres) <= 2)\n assert(type(date_created) == str)\n assert('\\n' not in label)\n assert(len(samples) <= 4)\n assert(len(fibres) <= 2)\n \n \n ADD_DATA_COMMAND = (\"INSERT INTO data \"\n \"(label,description, type, data, data_size, data_duration, data_numpoints, sampleId, sampleId2, sampleId3, sampleId4, fibreId, fibreId2, date_created) \"\n \"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\")\n \n # get binary data from the file path specified\n data = None\n data_size = 0\n num_data_points = 0\n duration = 0 \n if (datapath != ''):\n data = open(datapath, 'rb').read()\n data_size = path.getsize(datapath)\n if verbose: print(\"File uploaded: \" + str(data_size / 1000.0) + \" KB\")\n \n # get metadata from .csv file\n df = pd.read_csv(datapath)\n num_data_points = len(df) \n if (len(df) > 0):\n if ('time' in df):\n duration = df['time'].values[len(df)-1] - df['time'].values[0] \n elif ('Time' in df): \n duration = df['Time'].values[len(df)-1] - df['Time'].values[0]\n else:\n duration = -1 \n \n # fill in today's date,if none was given\n if (date_created == ''):\n date_created = date.today().strftime(\"%Y-%m-%d\")\n \n # Get sample ids \n sampleIds = []\n if (len(samples)>0 and type(samples[0]) == str):\n for s in samples:\n theId = self.get_by_label(s, 'samples')\n sampleIds.append(None if theId==-1 else theId )\n elif (len(samples)>0 and type(samples[0]) == int):\n sampleIds = samples\n # Ensure sample id list if exactly 4 items long\n sampleIds = [ sampleIds[i] if i<len(sampleIds) else None for i in range(4)]\n \n \n # get fibre ids\n fibreIds = []\n if (len(fibres)>0 and type(fibres[0]) == str):\n for f in fibres:\n theId = self.get_by_label(f, 'fibres')\n fibreIds.append(None if theId==-1 else theId )\n if (len(fibres)>0 and type(fibres[0]) == int):\n fibreIds = fibres\n # Ensure fibre id list if exactly 2 items long\n fibreIds = [ fibreIds[i] if i<len(fibreIds) else None for i in range(2)]\n \n \n new_data = (label, description, data_type, data, data_size, duration, num_data_points, sampleIds[0], sampleIds[1], sampleIds[2], sampleIds[3], fibreIds[0], fibreIds[1], date_created)\n \n \n \n self.cursor.execute(ADD_DATA_COMMAND, new_data)\n \n self.cnx.commit()\n \n \n if verbose: print(\"Data added successfully\")",
"def save_as_csv(path,data,NO_SENSORS):\n\n HEADER1 = [ ['Sensor 1'],\n ['X','Y','Z','Time/ms'] ]\n HEADER2 = [ ['Sensor 1',' ',' ',' ','Sensor 2'],\n ['X','Y','Z','Time/ms','X','Y','Z','Time/ms'] ]\n HEADER3 = [ ['Sensor 1',' ',' ',' ','Sensor 2',' ',' ',' ','Sensor 3'],\n ['X','Y','Z','Time/ms','X','Y','Z','Time/ms','X','Y','Z','Time/ms'] ]\n HEADER4 = [ ['Sensor 1',' ',' ',' ','Sensor 2',' ',' ',' ','Sensor 3',' ',' ',' ','Sensor 4'],\n ['X','Y','Z','Time/ms','X','Y','Z','Time/ms','X','Y','Z','Time/ms','X','Y','Z','Time/ms'] ]\n HEADER5 = [ ['Sensor 1',' ',' ',' ','Sensor 2',' ',' ',' ','Sensor 3',' ',' ',' ','Sensor 4',' ',' ',' ','Sensor 5'],\n ['X','Y','Z','Time/ms','X','Y','Z','Time/ms','X','Y','Z','Time/ms','X','Y','Z','Time/ms','X','Y','Z','Time/ms'] ]\n\n HEADERS = [HEADER1,HEADER2,HEADER3,HEADER4,HEADER5]\n\n HEADER = HEADERS[NO_SENSORS - 1]\n\n # The data is saved as a CSV file using the given path\n with open(path, 'w') as csv_file:\n csv_write = csv.writer(csv_file, dialect='excel')\n csv_write.writerows(HEADER)\n csv_write.writerows(data)",
"def export_data(self):\r\n \r\n \r\n output_file = 'export.csv'\r\n data = self.get_raw_data()\r\n \r\n if data != []:\r\n print('Writing to file', output_file)\r\n with open(output_file, 'w',) as csvfile:\r\n fluorescence_levels = csv.writer(csvfile)\r\n fluorescence_levels.writerow(['sensor_1','Time'])\r\n for i in data:\r\n fluorescence_levels.writerow(i)\r\n print('done')\r\n \r\n else:\r\n print('no recorded data')",
"def logging_data(self):\n with open('sensor_data.log','w') as f:\n json.dump(self.read_continuous_data, f)",
"def dataRecorder(layerNum, squareDifference ,epoch):\n writeStream = open('layer.' + str(layerNum) + '.csv', 'a')\n writeStream.write(str(epoch) + \",\" + str(squareDifference) + \"\\n\")\n writeStream.close()",
"def data_point(inputs: list):\n \n opv = '1'\n \n sample_id = 0\n \n timenow = strftime(\"%#m/%#d/%Y %#H:%M\")\n volts = inputs[0]\n current = inputs[1]\n power = inputs[2]\n \n data_point = [opv, sample_id, timenow, volts, current, power]\n\n if data_point == True:\n sample_id += 1\n \n return data_point",
"def update_data():\n values = temp_serial_placeholder()\n time = current_time_milli() - __start\n points = [ [time, values[0]], [time, values[1]] ]\n __data.append(points)\n return points",
"def record_data(self, time, x, tau):\n\n self.t_values.append(np.copy(time))\n self.x_values.append(np.copy(x))\n self.tau_values.append(np.copy(tau))",
"def write_data(self, packet):\n time_vector, signal = packet.get_data(self._fs)\n\n if len(time_vector) == 1:\n data = np.array(time_vector + signal)[:, np.newaxis]\n if self._rectime_offset is None:\n self._rectime_offset = time_vector\n else:\n if self._rectime_offset is None:\n self._rectime_offset = time_vector[0]\n data = np.concatenate((np.array(time_vector)[:, np.newaxis].T, np.array(signal)), axis=0)\n data = np.round(data, 4)\n\n if self.file_type == 'edf':\n if data.shape[0] != self._n_chan:\n raise ValueError('Input first dimension must be {}'.format(self._n_chan))\n self._data = np.concatenate((self._data, data), axis=1)\n\n if self._data.shape[1] > self._fs:\n self._file_obj.writeSamples(list(self._data[:, :self._fs]))\n self._data = self._data[:, self._fs:]\n elif self.file_type == 'csv':\n self._csv_obj.writerows(data.T.tolist())\n self._file_obj.flush()",
"def readout():\r\n\r\n #Getting the time of measurement.\r\n thetime = datetime.datetime.now().strftime(\"%H:%M:%S,%f\")[:-5]\r\n\r\n #Additional parameters to read out can be added here.\r\n T_BL100 = bl100.readParameter (142)\r\n MF_BL100 = bl100.readParameter(205)\r\n Rho_BL100 = bl100.readParameter (270)\r\n T_Cori = coriflow.readParameter(142)\r\n MF_Cori = coriflow.readParameter(205)\r\n Rho_Cori = coriflow.readParameter (270) \r\n DP = diffp.readParameter (205)\r\n\r\n #This function has multiple outputs: the sensors' measurements and the time of measurement.\r\n readoutdata = (thetime,T_BL100, MF_BL100, Rho_BL100, T_Cori, MF_Cori, Rho_Cori, DP)\r\n return readoutdata",
"def sample(self):\n print(\"sampling bluetooth arduino\")\n self.sock.send(b'B')\n data = b''\n '''while True:\n data += self.sock.recv(1024)\n if data.endswith(b'\\n'):\n break\n '''\n #self.sock.settimeout(2)\n try:\n while True:\n d = self.sock.recv(255)\n data += d\n if d.find(b'\\n') != -1:\n break\n except Exception as err:\n print(err)\n pass\n print(data)\n data = json.loads(data.decode())\n if not any(x == 0 for x in data.values()):\n for label in self.data_labels:\n self.data[label].append(data[label])\n #self.data[\"time\"].append(time.time() - self.start)\n print(data)\n #print(self.data)\n #return self.data",
"def __log_data_handler(self, event, sender, data):\n pos_x = -data.mvo.pos_x\n pos_y = -data.mvo.pos_y\n pos_z = -data.mvo.pos_z\n # First time we have meaningful values, we store them as reference\n if abs(pos_x) + abs(pos_y) + abs(pos_z) > 0.07:\n if self.ref_pos_x == -1:\n self.ref_pos_x = pos_x\n self.ref_pos_y = pos_y\n self.ref_pos_z = pos_z\n else:\n self.pos_x = pos_x - self.ref_pos_x\n self.pos_y = pos_y - self.ref_pos_y\n self.pos_z = pos_z - self.ref_pos_z\n\n qx = data.imu.q1\n qy = data.imu.q2\n qz = data.imu.q3\n qw = data.imu.q0\n\n degree = 0.01745\n siny = 2 * (qw * qz + qx * qy)\n cosy = 1 - 2 * (qy * qy + qz * qz)\n self.yaw = int(atan2(siny, cosy) / degree)\n\n if self.write_header:\n self.log_file.write(f\"{data.format_cvs_header()}\\n\")\n self.write_header = False\n self.log_file.write(f\"{data.format_cvs()}\\n\")",
"def recive_data(self):\n # read all available data\n while self.ser.inWaiting() > self.INPUT_DATA_SIZE+1:\n data = array.array('c')\n # search the header\n data.append(self.ser.read(1))\n while data[0] != chr(1):\n data[0] = self.ser.read(1)\n \n # wait for all available data\n while self.ser.inWaiting() < (self.INPUT_DATA_SIZE-1):\n time.sleep(0.03);\n \n # recives data\n data = self.ser.read(self.INPUT_DATA_SIZE-1)\n \n # prove if you want graphical data\n if self.pushButton_monitor.isChecked():\n # decodes the data\n t = struct.unpack('I', data[3]+data[2]+data[1]+data[0])\n r = struct.unpack('f', data[4]+data[5]+data[6]+data[7])\n x0 = struct.unpack('f', data[8]+data[9]+data[10]+data[11])\n x1 = struct.unpack('f', data[12]+data[13]+data[14]+data[15])\n u = struct.unpack('f', data[16]+data[17]+data[18]+data[19])\n \n self.time = t[0]*25e-9\n \n # prepare the string output\n aux_str = \" t = \"+str(self.time)+\"\\t\"\n aux_str += \" r = \"+str(r[0])+\"\\t\"\n aux_str += \" u = \"+str(u[0])+\"\\t\"\n aux_str += \" x1 = \"+str(x1[0])+\"\\t\"\n aux_str += \" x0 = \"+str(x0[0])+\"\\n\"\n # print string output\n self.textBrowser.insertPlainText(aux_str)\n \n # append data to the arrays\n self.graf_t.append(self.time)\n self.graf_r.append(r[0])\n self.graf_x0.append(x0[0])\n self.graf_x1.append(x1[0])\n self.graf_u.append(u[0])\n \n # remove one value if the arrays have maximum length\n if self.graf_t.buffer_info()[1] >= NUM_SAMPLES:\n self.graf_t.pop(0)\n self.graf_r.pop(0)\n self.graf_x0.pop(0)\n self.graf_x1.pop(0)\n self.graf_u.pop(0)\n \n # reload number of samples lavel\n self.label_samples_value.setText(str(self.graf_t.buffer_info()[1]))\n # reload number of waiting chars in serial rx buffer\n self.label_rx_buff_value.setText(str(self.ser.inWaiting()))\n\n # reload mutex area\n self.updated_data = 1\n \n # prove if there are available id's\n if (self.actionPC_Monitor.isChecked() and data[20] == chr(2)):\n # if it is true, looks how much id's\n i = struct.unpack('B', data[21])\n\n if i[0] < STACK_SIZE:\n for z in range(i[0]):\n new_device = struct.unpack('B', data[z+22])\n new_string = str(new_device[0])\n \n llista = self.listWidget_link.findItems(new_string, QtCore.Qt.MatchExactly)\n if len(llista) == 0:\n self.listWidget_link.addItem(new_string)",
"def save_data(self, f): \n if not self.sampling:\n self.convert_to_array()\n np.save(f, self.reads)",
"def take_data(num_points, sampling_rate):\n\n\tanalog_input = Task()\n\tread = int32()\n\tdata = numpy.zeros((num_points,), dtype=numpy.float64)\n\n\t# DAQmx Configure Code\n\tanalog_input.CreateAIVoltageChan(\"Dev1/ai0\", \"\", DAQmx_Val_Cfg_Default, -10.0, 10.0, DAQmx_Val_Volts, None)\n\tanalog_input.CfgSampClkTiming(\"\", sampling_rate, DAQmx_Val_Rising, DAQmx_Val_FiniteSamps,num_points)\n\n\t# DAQmx Start Code\n\tanalog_input.StartTask()\n\n\t# DAQmx Read Code\n\tanalog_input.ReadAnalogF64(num_points, 10.0, DAQmx_Val_GroupByChannel, data, num_points, byref(read),None)\n\n\treturn data",
"def record(self):\n\n # TODO: Make the Metadata transmission automatic\n n_channels = 32\n sampling_rate = 500\n channel_types = 'eeg'\n\n # Info class required by mne\n info = mne.create_info(ch_names=n_channels, sfreq=sampling_rate, ch_types=channel_types)\n\n # TODO: Dynamically reduce array size\n\n while self.flag_event.is_set():\n sample, timestamp = self.inlet.pull_sample()\n self.timeObj.append(timestamp)\n self.sampleObj.append(sample)\n self.data = np.array(self.sampleObj).reshape((n_channels, -1)) * 1e-6\n if (self.data.shape[1]+1) % sampling_rate == 0:\n custom_raw = mne.io.RawArray(self.data, info)\n custom_raw.save(\"./Data/sample_raw.fif\", overwrite=True)\n\n # TODO: Finish real time data plotting\n # print(self.data.shape)\n # if (self.data.shape[1]+1) % sampling_rate == 0:\n # # custom_raw = mne.io.RawArray(self.data, info)\n # # custom_raw.plot()\n # # plt.plot(self.timeObj, data.T * 1e-6)\n # # plt.pause(0.05)\n # # plt.show()\n # ani = animation.FuncAnimation(fig, self.animate, interval=10)\n # plt.pause(0.05)\n # plt.show()",
"def data_log(self, file, spectra):\n if self.datalogflag:\n with open(file, 'a') as f:\n f.write('{0}, '.format(spectra))\n self.vprint(\n 2, 'Writing spectra to data log at {}'.format(file))",
"def Writedata(self, tstep):\n \n nc = Dataset(self.outfile, 'a')\n \n nc.variables['time'][tstep] = self.time\n nc.variables['salt'][tstep] = self.salt\n nc.variables['temp'][tstep] = self.temp\n nc.variables['uc'][tstep] = self.uc\n nc.variables['vc'][tstep] = self.vc\n nc.variables['nu_v'][tstep] = self.nu_v\n nc.variables['rho'][tstep] = self.rho\n nc.variables['tau_x'][tstep] = self.tau_x\n nc.variables['tau_y'][tstep] = self.tau_y\n nc.variables['eta'][tstep] = self.eta\n \n nc.close()",
"def main():\r\n N = 200 # number of samples\r\n port_name = 'COM4' # serial port name\r\n port_speed = 19200 # serial port speed/ baudrate (bits per second)\r\n \r\n t,percent = get_data(N,port_name,port_speed) # get data\r\n file_write(t,percent) # write data to file\r",
"def addSample(self, time, x, y, z):\n\t\tself.numSamples += 1\n\t\tif self.prevTime != None:\n\t\t\tdt = abs(time - self.prevTime)\n\t\t\tself.timeDifferences[dt] += 1\n\t\t\tif dt > self.highDT:\n\t\t\t\tif getTimeDifference(self.rawData[self.currIdx]) >= self.minSampleTime:\n\t\t\t\t\tself.currIdx += 1\n\t\t\t\t\tself.rawData.append(list())\n\t\t\t\telse:\n\t\t\t\t\tself.rawData[self.currIdx] = list()\n\t\t\t\t\n\t\t\telse: \n\t\t\t\tself.rawData[self.currIdx].append(preProcess.resultantAcceleration(time, x, y, z))\n\n\t\tself.prevTime = time",
"def savedata(outfile):\n\n global BTRACK, GSTRUC, NPIX\n \n print('SAVING DATA to '+outfile)\n\n # Back up any existing file\n picklefile = outfile.replace('.fits','.pkl')\n backpicklefile = picklefile+'.backup' \n if os.path.exists(picklefile):\n if os.path.exists(backpicklefile):\n shutil.move(picklefile,backpicklefile)\n \n # Write tracking structures to pickle file\n with open(picklefile, 'wb') as f:\n pickle.dump(BTRACK, f)\n pickle.dump(GSTRUC, f) \n\n # Remove backup file if it exists\n if os.path.exists(backpicklefile):\n os.remove(backpicklefile)\n \n # Construct gstruc output structure\n count = GSTRUC['count']\n ngauss = GSTRUC['ngauss']\n dtype = np.dtype([('x',int),('y',int),('par',float,3),('sigpar',float,3),('rms',float),\n ('noise',float),('lon',float),('lat',float)])\n gstruc = np.zeros(ngauss,dtype=dtype)\n cnt = 0\n for i in range(count):\n tstr1 = GSTRUC['data'][i]\n ngauss1 = len(tstr1['par'])//3\n gstruc1 = np.zeros(ngauss1,dtype=dtype)\n gstruc1['x'] = tstr1['x']\n gstruc1['y'] = tstr1['y']\n gstruc1['lon'] = tstr1['lon']\n gstruc1['lat'] = tstr1['lat'] \n gstruc1['rms'] = tstr1['rms']\n gstruc1['noise'] = tstr1['noise']\n gstruc1['par'] = tstr1['par'].reshape(ngauss1,3)\n gstruc1['sigpar'] = tstr1['sigpar'].reshape(ngauss1,3)\n gstruc[cnt:cnt+ngauss1] = gstruc1\n cnt += ngauss1\n gstruc = Table(gstruc)\n gstruc.write(outfile,overwrite=True)\n print(str(len(gstruc))+' gaussians')\n \n return gstruc"
] | [
"0.6557734",
"0.5812748",
"0.5615559",
"0.55976075",
"0.5516802",
"0.54913497",
"0.54551655",
"0.5406054",
"0.5381206",
"0.53806",
"0.53675306",
"0.5364671",
"0.5356917",
"0.53516746",
"0.5319687",
"0.5301007",
"0.5295816",
"0.525568",
"0.52149343",
"0.5212219",
"0.5188131",
"0.5184165",
"0.51569605",
"0.51560104",
"0.5148536",
"0.51351726",
"0.5131377",
"0.5128726",
"0.5124959",
"0.51184344"
] | 0.7342835 | 0 |
A function that takes a numpy array, given by the np_samples(numpy aray) parameter, of samples collected from 3Axis Accelerometers in the form [ID,X,Y,Z,Time] and returns a numpy area of samples where each ID value has the same amount of samples. Each unique ID relates to an associated sensor and the total number of sensors should be given with the parameter NO_SENSORS(int). The function operates by finding the sensor with the least samples and removing the last obtained samples from any other sensors that exceed this amount. This makes further data manipulation and sorting possible. | def equalise_sample_numbers(np_samples,NO_SENSORS):
length = [] # Holds the amount of samples associated with each ID
# Finds the number of samples for each sensor by checking ID and saves the amount in list
for i in range(1,NO_SENSORS+1):
length.append((np_samples[:,0] == i).sum())
# Find the ID with the least samples and then calculate how many more samples each ID has
# than the minimum, saving the difference in np_difference.
np_difference = np.array(length) - min(length)
# Removes the final values obtained for each sensor so that each sensor has the same
# number of samples.
for i in range(0,NO_SENSORS):
if (np_difference[i] != 0):
equal = 0;
j = -1;
while(equal < np_difference[i]):
if (np_samples[j][0] == (i + 1)):
np_samples = np.delete(np_samples,j,0)
equal += 1
j -= 1
return (np_samples) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _avg_sample(self):\n samples = [0] * self.num_samples\n for i in range(self.num_samples):\n samples[i] = self.sensor.measure_distance()\n time.sleep(self.sample_delay)\n if self.drop_extremes:\n samples.sort()\n samples = samples[1:-1]\n return sum(samples) / len(samples)",
"def get_average_measurements_for_area(area_id):\n locations = db_access.get_locations_for_area(area_id)\n\n if len(locations) == 0:\n return None\n else:\n return_table = []\n for i in locations:\n for k in db_access.get_measurements_for_location(i[0]):\n return_table.append(k[1])\n return mean(return_table)",
"def split_area(a, pieces=4, step_size=10, tol=1.0, x_axis=True):\r\n xs = a[:, 0]\r\n uni = np.unique(xs)\r\n ax_min, ax_max = uni[0], uni[-1]\r\n steps, incr = np.linspace(ax_min, ax_max, num=pieces+1,\r\n endpoint=True, retstep=True)\r\n # ---- Do the work, clip the axes, get the unique points, calculate area\r\n arrs = []\r\n areas = []\r\n for i in range(1, len(steps)):\r\n sub = _clip_ax(a, True, steps[i-1], steps[i]) # ---- clip_ax\r\n sub = _uni_pnts(sub) # ---- uni_pnts\r\n areas.append(e_area(sub)) # ---- e_area\r\n arrs.append(sub)\r\n tot_area = sum(areas)\r\n cum_area = np.cumsum(areas)\r\n area = tot_area/float(pieces) # required area\r\n bins = np.arange(1, pieces+1) * area\r\n inds = np.digitize(cum_area, bins)\r\n f = np.where(inds[:-1]-inds[1:] != 0)[0]\r\n t = f + 2\r\n# L, B, R, T = _extent(a)\r\n# keep = []\r\n# tot_area = e_area(a)\r\n# area = tot_area/float(pieces) # required area\r\n# cal = 0.0\r\n# tol = area*tol/100.\r\n# check = np.abs(area-cal)\r\n# if x_axis:\r\n# n = 0\r\n# step=step_size\r\n# right = L + step\r\n# while (check > tol) and (n < 20):\r\n# k = clip_ax(a, x_axis=True, ax_min=L, ax_max=right)\r\n# cal = e_area(k)\r\n# #print(n, cal)\r\n# check = area-cal\r\n# print(\"area {} check {} right {}\".format(cal, check < tol, step))\r\n# if check > 0.:\r\n# right += step_size\r\n# else:\r\n# step_size /= 2.\r\n# right -= step_size\r\n# k = clip_ax(a, x_axis=True, ax_min=L, ax_max=right)\r\n# cal = e_area(k)\r\n# check = np.abs(area-cal)\r\n# print(\"....area-cal {} \".format(cal))\r\n## step += step_size\r\n# n += 1\r\n\r\n# else:\r\n# fac = np.linspace(B, T, num=divisor+1, endpoint=True)\r\n# f = fac[:-1]\r\n# t = fac[1:]\r\n# for i in range(divisor):\r\n# k = clip_ax(a, x_axis=False, ax_min=f[i], ax_max=t[i])\r\n# _, idx = np.unique(k, return_index=True, axis=0)\r\n# k = np.concatenate((k[np.sort(idx)], [k[0]]))\r\n# keep.append(k)\r\n return arrs, areas, f, t",
"def collect_samples(serialPort,NO_SENSORS,NO_SAMPLES,log):\n run = '1'\n badSamples = 0\n count = 1\n log_temp = []\n temp = [0] * 20\n NO_FIELDS = (NO_SENSORS * 3) + 1\n \n while (run == '1'):\n # If the input buffer is not empty read the data out into rawData using \\n as a delimiter.\n if (serialPort.inWaiting()>0):\n rawData = serialPort.readline()\n print(rawData)\n \n # If invalid data is recieved this prevents program crash\n try:\n # Decode the bytes into a string\n data = rawData.decode()\n \n # Split x, y, z and newline values into a list\n if (count >= (NO_SAMPLES + 1)):\n endTime_temp = data.split(\" \", 2)\n if (len(endTime_temp) == 2 and '' not in endTime_temp):\n endTime = int(endTime_temp[0])\n else:\n endTime = 780\n print('Time not recieved')\n print('Lost Samples: ' + str(badSamples))\n run = '0'\n else:\n data_readings = data.split(\" \", NO_FIELDS)\n print(data_readings)\n \n # A correct sample should contain 16 values and not include null and so this is used\n # to validate the data and record any samples that are discarded in this way\n if (len(data_readings) == NO_FIELDS and '' not in data_readings):\n # Discard newline characters before saving data\n int_data_readings = list(map(int,data_readings[:(NO_FIELDS - 1)]))\n log_temp.append(int_data_readings)\n else:\n badSamples += 1\n except:\n print('Invalid data recieved')\n \n count += 1\n\n samplingPeriod = (endTime/NO_SAMPLES)/NO_SENSORS\n timeStamp = 0.0\n\n for i in range(0,len(log_temp)):\n for j in range(0,NO_SENSORS):\n temp[0+(j*4)] = log_temp[i][0+(j*3)]\n temp[1+(j*4)] = log_temp[i][1+(j*3)]\n temp[2+(j*4)] = log_temp[i][2+(j*3)]\n temp[3+(j*4)] = timeStamp\n timeStamp += samplingPeriod\n log.append(temp.copy())",
"def carla_lidar_measurement_to_ndarray(\n lidar_measurement: carla.LidarMeasurement, # pylint: disable=no-member\n pixels_per_meter: int = 2,\n hist_max_per_pixel: int = 5,\n meters_max: int = 50,\n) -> np.ndarray:\n\n def splat_points(\n point_cloud,\n pixels_per_meter: int,\n hist_max_per_pixel: int,\n meters_max: int,\n ):\n \"\"\"Converts point cloud to 2D histograms.\"\"\"\n # Allocate 2D histogram bins.\n ymeters_max = meters_max\n xbins = np.linspace(\n -meters_max,\n meters_max + 1,\n meters_max * 2 * pixels_per_meter + 1,\n )\n ybins = np.linspace(\n -meters_max,\n ymeters_max + 1,\n ymeters_max * 2 * pixels_per_meter + 1,\n )\n # Compute histogram of x and y coordinates of points.\n hist = np.histogramdd(point_cloud[..., :2], bins=(xbins, ybins))[0]\n # Clip histogram\n hist[hist > hist_max_per_pixel] = hist_max_per_pixel\n # Normalize histogram by the maximum number of points in a bin we care about.\n overhead_splat = hist / hist_max_per_pixel\n # Return splat in X x Y orientation, with X parallel to car axis, Y perp, both parallel to ground.\n return overhead_splat\n\n # Serialise and parse to `NumPy` tensor.\n points = np.frombuffer(lidar_measurement.raw_data, dtype=np.dtype(\"f4\"))\n points = np.reshape(points, (int(points.shape[0] / 3), 3))\n\n # Split observations in the Z dimension (height).\n below = points[points[..., 2] <= -2.5]\n above = points[points[..., 2] >= -2.5]\n # Convert point clouds to 2D histograms.\n features = list()\n features.append(\n splat_points(\n below,\n pixels_per_meter,\n hist_max_per_pixel,\n meters_max,\n ))\n features.append(\n splat_points(\n above,\n pixels_per_meter,\n hist_max_per_pixel,\n meters_max,\n ))\n features = np.stack(features, axis=-1)\n\n return features.astype(np.float32)",
"def sample_data(self, samples, max_cor):\n r = lambda: np.random.randint(1, max_cor)\n self.data = [[r(), r()] for _ in range(samples)]",
"def find_center(file):\n\n data = pyfits.getdata(file)\n chipx = data.field('X')\n chipy = data.field('Y')\n#\n#--- because the array is too large to handle in one swipe, divide it into 8x8 segments\n#\n xmin = min(chipx)\n ymin = min(chipy)\n xmax = max(chipx)\n ymax = max(chipy)\n xstep = int((xmax-xmin) / 8 )\n ystep = int((ymax-ymin) / 8 )\n#\n#--- find the interval which contains largest samples \n#\n cposx = 0\n cposy = 0\n cmax = 0\n for i in range (0, 8):\n xstart = xstep * i + xmin\n xstop = xstart + xstep\n for j in range (0, 8):\n ystart = ystep * j + ymin\n ystop = ystart + ystep\n\n mask = (data.field('X') >= xstart) & (data.field('X') < xstop) & (data.field('Y') >= ystart) & (data.field('Y') < ystop)\n temp = data[mask]\n chipx_p = temp.field('X')\n chipy_p = temp.field('Y')\n if len(chipx_p) > cmax:\n cmax = len(chipx_p)\n cposx = i\n cposy = j\n#\n#--- extract the area of the highest count\n#\n xpos_list = []\n ypos_list = []\n maxv_list = []\n xstart = xstep * cposx + xmin\n xstop = xstart + xstep\n\n ystart = ystep * cposy + ymin\n ystop = ystart + ystep\n\n mask = (data.field('X') >= xstart) & (data.field('X') < xstop) & (data.field('Y') >= ystart) & (data.field('Y') < ystop)\n temp = data[mask]\n chipx_p = temp.field('X')\n chipy_p = temp.field('Y')\n#\n#--- count up the events. bin to 2x2 so that we get enough count in each bin\n#\n xmin = min(chipx_p)\n xmax = max(chipx_p)\n xdim = int(0.5 * (xmax - xmin)) + 1\n ymin = min(chipy_p)\n ymax = max(chipy_p)\n ydim = int(0.5 * (ymax - ymin)) + 1\n\n cbin = [[0 for y in range(0, ydim)] for x in range(0, xdim)]\n for j in range(0, len(chipy_p)):\n xpos = int(0.5 * (chipx_p[j]-xmin))\n ypos = int(0.5 * (chipy_p[j]-ymin))\n cbin[xpos][ypos] += 1\n#\n#--- now find max position\n#\n vmax = 0\n xx = 0\n yy = 0\n for m in range(0, xdim):\n for n in range(0, ydim):\n if cbin[m][n] > vmax:\n vmax = cbin[m][n]\n xx = m\n yy = n\n#\n#--- take the mddle of the bin as the brightest spot\n#\n xv = int(xx * 2.0 + 1.0 + xmin)\n yv = int(yy * 2.0 + 1.0 + ymin)\n\n return [xv, yv]",
"def remove_annual_mean(data,data_obs,lats,lons,lats_obs,lons_obs):\n \n ### Import modulates\n import numpy as np\n import calc_Utilities as UT\n \n ### Create 2d grid\n lons2,lats2 = np.meshgrid(lons,lats)\n lons2_obs,lats2_obs = np.meshgrid(lons_obs,lats_obs)\n \n ### Calculate weighted average and remove mean\n data = data - UT.calc_weightedAve(data,lats2)[:,:,np.newaxis,np.newaxis]\n data_obs = data_obs - UT.calc_weightedAve(data_obs,lats2_obs)[:,np.newaxis,np.newaxis]\n \n return data,data_obs",
"def _sample_from_null_frm_dist(mean_spike_count, total_baseline_time, total_effect_time, sample_size=10 ** 6):\n total_time = total_baseline_time + total_effect_time\n\n samples = (\n st.poisson(mean_spike_count * total_effect_time / total_time).rvs(sample_size) / total_effect_time\n -\n st.poisson(mean_spike_count * total_baseline_time / total_time).rvs(sample_size) / total_baseline_time\n )\n\n # convert 1/ms to 1/s (Hz)\n samples = samples / MS_TO_S\n\n return samples",
"def sampleArray(sizeOfReducedSample = DEFSIZEOFREDUCEDSAMPLE): \n return np.zeros((sizeOfReducedSample, sizeOfReducedSample))",
"def cut_sample(whole_audio_data, num_samples):\n len_audio_data = len(whole_audio_data)\n if num_samples >= len_audio_data:\n raise Exception(\"Length of to be generated signal cannot be greater and equal to original audio signal\")\n sys.exit(-1)\n\n # generate a random number which is used as a first index to cut off\n ind = random.randint(0, len_audio_data-num_samples)\n gen_data = whole_audio_data[ind:ind+num_samples]\n return gen_data",
"def tolerant_mean(arrs):\n lens = [len(i) for i in arrs]\n arr = np.ma.empty((np.max(lens),len(arrs)))\n arr.mask = True\n for idx, l in enumerate(arrs):\n arr[:len(l),idx] = l.flatten()\n return arr",
"def nino34(ds: xr.Dataset, file: str, var: str, threshold: float=False):\n ds = select_var(ds, var)\n ref = xr.open_dataset(file)\n ref = select_var(ref, var)\n n34 = [-5, 5, 120, 170]\n ds_n34 = subset_spatial(ds, n34[0], n34[1], n34[2], n34[3])\n ref_n34 = subset_spatial(ref, n34[0], n34[1], n34[2], n34[3])\n n34_anom = ds_n34 - ref_n34\n n34_ts = n34_anom.mean(dim=['lat', 'lon'])\n windows = {'time':5}\n i = True\n for item in n34_ts[var].rolling(**windows):\n # After the mean I probably have to give 'time' dimension\n # back to the item, so that the rolling means can then be concatenated\n # into a timeseries.\n if i:\n retset = item[0].mean()\n i = False\n print('====')\n print(item[0])\n print(item[1])\n print('====')\n retset = xr.concat([retset, item[0].mean()])\n print(retset)\n #n34_running_mean = pd.rolling_mean(n34_ts[var].values, 5)\n #return xr.Dataset(n34_running_mean)",
"def find_zeros(self, x_axis, array, inicio_index):\r\n delta_ppm = 0.2\r\n step = np.abs(x_axis[1]-x_axis[0])\r\n delta_n = int(delta_ppm/2/step)\r\n \r\n mean_x = []\r\n mean_array = []\r\n std_array = []\r\n \r\n index = inicio_index + 4*delta_n\r\n intervalo = array[index-delta_n : index+delta_n]\r\n new = np.mean(intervalo)\r\n condition = True\r\n while condition: \r\n old = new\r\n sgn_old = np.sign(old)\r\n std = np.std(intervalo)\r\n # guardo los valores\r\n mean_x.append(x_axis[index])\r\n mean_array.append(old)\r\n std_array.append(std)\r\n # doy un paso\r\n index = int(index + 2*delta_n)\r\n if index + 2*delta_n < array.size:\r\n intervalo = array[index-delta_n : index+delta_n] \r\n else:\r\n break\r\n new = np.mean(intervalo)\r\n sgn_new = np.sign(new) \r\n cond1 = sgn_new==sgn_old\r\n # cond2: es falsa cuando la desviacion es chica y esta cerca de cero)\r\n cond2 = not( std<100 and abs(new)<0.1*abs(array).max() )\r\n condition = cond1 and cond2\r\n \r\n index_fin = index - delta_n\r\n \r\n index = inicio_index - 4*delta_n\r\n intervalo = array[index-delta_n : index+delta_n]\r\n new = np.mean(intervalo)\r\n condition = True\r\n while condition:\r\n old = new\r\n sgn_old = np.sign(old)\r\n std = np.std(intervalo)\r\n # guardo los valores\r\n mean_x.append(x_axis[index])\r\n mean_array.append(old)\r\n std_array.append(std)\r\n # doy un paso\r\n index = int(index - 2*delta_n)\r\n if index - 2*delta_n > 0:\r\n intervalo = array[index-delta_n : index+delta_n] \r\n else:\r\n break\r\n new = np.mean(intervalo)\r\n sgn_new = np.sign(new)\r\n cond1 = sgn_new==sgn_old\r\n cond2 = not( std<100 and abs(new)<0.1*abs(array).max() )\r\n condition = cond1 and cond2\r\n \r\n index_ini = index + delta_n\r\n \r\n \r\n \r\n # grafico para testear:\r\n plt.figure(432)\r\n plt.plot(x_axis,array*0, 'k--')\r\n plt.plot(x_axis,array)\r\n plt.plot(mean_x, mean_array, 'o')\r\n ax = plt.gca()\r\n ax.invert_xaxis()\r\n \r\n plt.figure(431)\r\n plt.plot(mean_x, np.zeros_like(mean_x))\r\n plt.plot(mean_x, std_array, 'o')\r\n ax = plt.gca()\r\n ax.invert_xaxis()\r\n \r\n \r\n return index_ini, index_fin",
"def filt1(X, yvals, xvals, ny, nx):\n \n ylen = X.shape[0]\n xlen = X.shape[1]\n\n yflen = (ylen-1)//ny\n xflen = (xlen-1)//nx\n\n Y = np.zeros((X.shape))\n\n #Y = Y[0:yflen,0:xflen,]\n\n ymax = ny*yflen+1\n xmax = nx*xflen+1\n\n Y = Y[0:ymax,0:xmax,]\n Xnew = X[0:ymax,0:xmax,]\n yvals = yvals[0:ymax,0:xmax,]\n xvals = xvals[0:ymax,0:xmax,] \n\n counter = np.zeros((Y.shape))\n \n for i in range(xflen):\n xmin = nx*i\n xmax = nx*(i+1)+1\n for j in range(yflen):\n ymin = ny*j\n ymax = ny*(j + 1)+1\n #print((xmin,xmax), (ymin,ymax))\n Y[ymin:ymax,xmin:xmax,] = Y[ymin:ymax,xmin:xmax,] + np.mean(X[ymin:ymax,xmin:xmax,], axis=(0,1))\n counter[ymin:ymax,xmin:xmax,] = counter[ymin:ymax,xmin:xmax,] + 1\n\n Y = Y/counter #We take the average of the points that appear more than once\n\n return Xnew, Y, yvals, xvals",
"def removeIncompleteSamples(data):\n print(\"Removing incomplete samples...\")\n\n M = len(data)\n N = data[0].shape[0]\n samples_to_remove = []\n for n in range(N):\n for m in range(M):\n if pd.isnull(data[m].iloc[n][0]):\n samples_to_remove.append(n)\n break\n\n if len(samples_to_remove) > 0:\n print(\"A total of \" + str(len(samples_to_remove)) + \" sample(s) have at least a missing view and will be removed\")\n\n data_filt = [None]*M\n samples_to_keep = np.setdiff1d(range(N),samples_to_remove)\n for m in range(M):\n data_filt[m] = data[m].iloc[samples_to_keep]\n\n return data_filt",
"def remove_outliers(tx, mean_x, std_x):\n n_tx = tx.copy()\n for sample in range(tx.shape[0]):\n for dim in range(tx.shape[1]):\n if (n_tx[sample, dim] > mean_x[dim] + 2 * std_x[dim]):\n n_tx[sample, dim] = mean_x[dim]\n if (n_tx[sample, dim] < mean_x[dim] - 2 * std_x[dim]):\n n_tx[sample, dim] = mean_x[dim]\n if (n_tx[sample, dim] == -999):\n n_tx[sample, dim] = 0\n return n_tx",
"def get_mean_in_time(trajectories, nb_bins=15, freq_range=[0.4, 0.6]):\n # Create bins and select trajectories going through the freq_range\n time_bins = np.linspace(-950, 2000, nb_bins)\n trajectories = [traj for traj in trajectories if np.sum(np.logical_and(\n traj.frequencies >= freq_range[0], traj.frequencies < freq_range[1]), dtype=bool)]\n\n # Offset trajectories to set t=0 at the point they are seen in the freq_range and adds all the frequencies / times\n # to arrays for later computation of mean\n t_traj = np.array([])\n f_traj = np.array([])\n for traj in trajectories:\n idx = np.where(np.logical_and(traj.frequencies >=\n freq_range[0], traj.frequencies < freq_range[1]))[0][0]\n traj.t = traj.t - traj.t[idx]\n t_traj = np.concatenate((t_traj, traj.t))\n f_traj = np.concatenate((f_traj, traj.frequencies))\n\n # Binning of all the data in the time bins\n filtered_fixed = [traj for traj in trajectories if traj.fixation == \"fixed\"]\n filtered_lost = [traj for traj in trajectories if traj.fixation == \"lost\"]\n freqs, fixed, lost = [], [], []\n for ii in range(len(time_bins) - 1):\n freqs = freqs + [f_traj[np.logical_and(t_traj >= time_bins[ii], t_traj < time_bins[ii + 1])]]\n fixed = fixed + [len([traj for traj in filtered_fixed if traj.t[-1] < time_bins[ii]])]\n lost = lost + [len([traj for traj in filtered_lost if traj.t[-1] < time_bins[ii]])]\n\n # Computation of the mean in each bin, active trajectories contribute their current frequency,\n # fixed contribute1 and lost contribute 0\n mean = []\n for ii in range(len(freqs)):\n mean = mean + [np.sum(freqs[ii]) + fixed[ii]]\n mean[-1] /= (len(freqs[ii]) + fixed[ii] + lost[ii])\n\n nb_active = [len(freq) for freq in freqs]\n nb_dead = [fixed[ii] + lost[ii] for ii in range(len(fixed))]\n\n return 0.5 * (time_bins[1:] + time_bins[:-1]), mean, nb_active, nb_dead",
"def arma_generate_sample(\n ar, ma, nsample, scale=1, distrvs=None, axis=0, burnin=0\n):\n distrvs = np.random.standard_normal if distrvs is None else distrvs\n if np.ndim(nsample) == 0:\n nsample = [nsample]\n if burnin:\n # handle burin time for nd arrays\n # maybe there is a better trick in scipy.fft code\n newsize = list(nsample)\n newsize[axis] += burnin\n newsize = tuple(newsize)\n fslice = [slice(None)] * len(newsize)\n fslice[axis] = slice(burnin, None, None)\n fslice = tuple(fslice)\n else:\n newsize = tuple(nsample)\n fslice = tuple([slice(None)] * np.ndim(newsize))\n eta = scale * distrvs(size=newsize)\n return signal.lfilter(ma, ar, eta, axis=axis)[fslice]",
"def test_remove_autos_with_pols():\n test_array = np.ones((4, 3, 3, 11, 21))\n out_array = utils.remove_auto_correlations(test_array, axes=(1, 2))\n assert (4, 6, 11, 21) == out_array.shape",
"def __cut_arrays(data_array, maximum_time, arrays_to_cut):\n\n try:\n begin_time = data_array[arrays_to_cut[0]][0][0]\n end_time = data_array[arrays_to_cut[0]][0][-1]\n delta_time = (\n data_array[arrays_to_cut[0]][0][1]\n - data_array[arrays_to_cut[0]][0][0]\n )\n total_time = end_time - begin_time\n if total_time > maximum_time:\n over_time = total_time - maximum_time\n array_elm_to_drop = int(over_time / delta_time)\n for arrays in arrays_to_cut:\n data_array[arrays][0] = data_array[arrays][0][\n array_elm_to_drop:\n ]\n data_array[arrays][1] = data_array[arrays][1][\n array_elm_to_drop:\n ]\n except:\n pass",
"def ice_area(data, mesh, hemisphere=\"N\", attrs={}):\n\n if len(data.shape) == 1:\n data = add_timedim(data)\n\n if hemisphere == \"N\":\n varname = \"siarean\"\n hemis_mask = mesh.y2 > 0\n else:\n varname = \"siareas\"\n hemis_mask = mesh.y2 < 0\n\n if isinstance(data, xr.DataArray):\n vol = (data[:, hemis_mask] * mesh.lump2[hemis_mask]).sum(axis=1)\n da = xr.DataArray(\n vol, dims=[\"time\"], coords={\"time\": data.time}, name=varname, attrs=attrs\n )\n return da\n else:\n area = (data[:, hemis_mask] * mesh.lump2[hemis_mask]).sum(axis=1)\n return area",
"def area(boxes):\n y_min, x_min, y_max, x_max = np.split(boxes, 4, axis=-1)\n return np.squeeze((y_max - y_min) * (x_max - x_min), [1])",
"def check_data(raw_data, nsamples, verbose):\n if verbose:\n print 'raw_data', raw_data\n print 'raw_data.shape', raw_data.shape\n\n uc_timings, uc_run_counts = np.unique(raw_data[:,0], return_counts=True)\n if verbose:\n print 'uc_timings', uc_timings\n print 'uc_run_counts', uc_run_counts\n\n n_of_runs = set(uc_run_counts)\n if len(n_of_runs) != 1 and len(n_of_runs) != 2:\n raise AssertionError(\n 'Something bad happened!\\nn_of_runs = {0}\\nlen(n_of_runs) = '\n '{1}'.format(n_of_runs, len(n_of_runs))\n )\n\n if len(n_of_runs) == 2:\n if np.diff(list(n_of_runs))[0] != 1:\n raise AssertionError(\n 'Something bad happened!\\nn_of_runs = '\n '{0}\\nnp.diff(list(n_of_runs))[0] = '\n '{1}'.format(n_of_runs, np.diff(list(n_of_runs))[0])\n )\n n_incomplete_pulse = np.sum(uc_run_counts == np.max(list(n_of_runs)))\n if verbose:\n print 'n_incomplete_pulse', n_incomplete_pulse\n clean_data = raw_data[:-n_incomplete_pulse]\n else:\n clean_data = raw_data\n\n if verbose:\n print 'clean_data', clean_data\n print 'clean_data.shape', clean_data.shape\n\n return clean_data",
"def data_filter(input_array, step):\n mod = input_array.shape[0] % step\n rows = input_array.shape[0] // step\n factor = np.arange(rows)\n if mod:\n in_mat = np.reshape(input_array[:-mod], (rows, -1))\n min_array = np.r_[in_mat.min(axis=1), min(input_array[-mod:])]\n max_array = np.r_[in_mat.max(axis=1), max(input_array[-mod:])]\n median = np.median(in_mat, axis=1)\n median_rest = np.median(input_array[-mod:])\n median_array = np.r_[median, median_rest]\n\n # get min, max and average value indices\n min_ind = in_mat.argmin(axis=1)\n min_ind += factor * step\n min_ind = np.append(min_ind, input_array[-mod:].argmin() + rows * step)\n\n max_ind = in_mat.argmax(axis=1)\n max_ind += factor * step\n max_ind = np.append(max_ind, input_array[-mod:].argmax() + rows * step)\n\n median_trans = np.reshape(median, (rows, -1))\n median_ind = abs(in_mat - median_trans).argmin(axis=1)\n median_ind += factor * step\n median_ind = np.append(median_ind, abs(\n input_array[-mod:] - median_rest).argmin() + rows * step)\n\n else:\n in_mat = np.reshape(input_array, (input_array.shape[0] // step, -1))\n min_array = in_mat.min(axis=1)\n max_array = in_mat.max(axis=1)\n median_array = np.median(in_mat, axis=1)\n\n # get min, max and average value indices\n min_ind = in_mat.argmin(axis=1)\n min_ind += factor * step\n\n max_ind = in_mat.argmax(axis=1)\n max_ind += factor * step\n\n median_trans = np.reshape(median_array, (rows, -1))\n median_ind = abs(in_mat - median_trans).argmin(axis=1)\n median_ind += factor * step\n\n return min_array, median_array, max_array, min_ind, median_ind, max_ind",
"def calcASA(atoms, probe=1.4, n_sphere_point=960):\r\n atoms.setRadii(getAtomRadii(atoms))\r\n\r\n sphere_points = generate_sphere_points(n_sphere_point)\r\n const = 4.0 * math.pi / len(sphere_points)\r\n\r\n test_point = [0.0, 0.0, 0.0]\r\n areas = []\r\n\r\n coords_all = atoms.getCoords()\r\n\r\n for i, atom_i in enumerate(atoms):\r\n\r\n neighbor_indices = find_neighbor_indices(atoms, probe, i)\r\n n_neighbor = len(neighbor_indices)\r\n j_closest_neighbor = 0\r\n radius = probe + atom_i.getRadius()\r\n\r\n n_accessible_point = 0\r\n for point in sphere_points:\r\n is_accessible = True\r\n\t test_point = np.dot(point,radius) + coords_all[i]\r\n cycled_indices = range(j_closest_neighbor, n_neighbor)\r\n cycled_indices.extend(range(j_closest_neighbor))\r\n\r\n for j in cycled_indices:\r\n atom_j = atoms[neighbor_indices[j]]\r\n r = atom_j.getRadius() + probe\r\n #diff_sq = np.linalg.norm(coords_all[neighbor_indices[j]] - test_point)\r\n diff_sq = pos_distance_sq(coords_all[neighbor_indices[j]], test_point)\r\n\t\tif diff_sq < r*r:\r\n j_closest_neighbor = j\r\n is_accessible = False\r\n break\r\n\t if is_accessible:\r\n n_accessible_point += 1\r\n\r\n area = const*n_accessible_point*radius*radius\r\n areas.append(area)\r\n\t#print str(atom_i.getResnum()) + \" \" + atom_i.getResname() + \" \" + str(area)\r\n return areas",
"def create_test_sino_3d(A=9, Nx=22, Ny=22, max_phase=5.0,\n ampl_range=(1.0, 1.0)):\n # initiate array\n resar = np.zeros((A, Ny, Nx), dtype=np.complex128)\n # 2pi coverage\n angles = np.linspace(0, 2*np.pi, A, endpoint=False)\n # x-values of Gaussian\n x = np.linspace(-Nx/2, Nx/2, Nx, endpoint=True).reshape(1, -1)\n y = np.linspace(-Ny/2, Ny/2, Ny, endpoint=True).reshape(-1, 1)\n # SD of Gaussian\n dev = min(np.sqrt(Nx/2), np.sqrt(Ny/2))\n # Off-centered rotation about second axis:\n off = Nx/7\n for ii in range(A):\n # Gaussian distribution sinogram\n x0 = np.cos(angles[ii])*off\n phase = np.exp(-(x-x0)**2/dev**2) * np.exp(-(y)**2/dev**2)\n phase = normalize(phase, vmax=max_phase)\n if ampl_range[0] == ampl_range[1]:\n # constant amplitude\n ampl = ampl_range[0]\n else:\n # ring\n ampldev = dev/5\n amploff = off*.3\n ampl1 = np.exp(-(x-x0-amploff)**2/ampldev**2)\n ampl2 = np.exp(-(x-x0+amploff)**2/ampldev**2)\n ampl = ampl1+ampl2\n ampl = normalize(ampl, vmin=ampl_range[0], vmax=ampl_range[1])\n resar[ii] = ampl*np.exp(1j*phase)\n return resar, angles",
"def element_area_and_temperature(freq_hz):\n # Element noise data.\n noise_data = {\n 'freqs': [0.05e9, 0.07e9, 0.11e9, 0.17e9, 0.25e9, 0.35e9,\n 0.45e9, 0.55e9, 0.65e9],\n 'a_eff': [1.8791, 1.8791, 1.8694, 1.3193, 0.6080, 0.2956,\n 0.2046, 0.1384, 0.0792],\n 't_sys': [4.0409e3, 1.5029e3, 0.6676e3, 0.2936e3, 0.1402e3, 0.0873e3,\n 0.0689e3, 0.0607e3, 0.0613e3]\n }\n log_freq = numpy.log10(freq_hz)\n freqs = numpy.array(noise_data['freqs'])\n a_eff = numpy.array(noise_data['a_eff'])\n t_sys = numpy.array(noise_data['t_sys'])\n f_cut = 2\n\n # Interpolate to get effective area.\n if freq_hz <= freqs[f_cut]:\n f = scipy.interpolate.interp1d(numpy.log10(freqs[:f_cut+1]), \n numpy.log10(a_eff[:f_cut+1]), kind='slinear')\n a_eff = 10**f(log_freq)\n else:\n f = scipy.interpolate.interp1d(numpy.log10(freqs[f_cut:]), \n numpy.log10(a_eff[f_cut:]), kind='cubic')\n a_eff = 10**f(log_freq)\n\n # Interpolate to get system temperature.\n f = scipy.interpolate.interp1d(numpy.log10(freqs), \n numpy.log10(t_sys), kind='cubic')\n t_sys = 10**f(log_freq)\n return a_eff, t_sys",
"def remove_outliers(self, matrix):\n input = matrix[:, :-1]\n row_incides_to_delete = []\n for j, column in enumerate(input.transpose()):\n self.feature_means.append(np.mean(column))\n self.feature_stds.append(np.std(column))\n\n for i, row in enumerate(input):\n cell = input[i, j]\n if cell > self.feature_means[j] + 3 * self.feature_stds[j] or cell < self.feature_means[j] - 3 * \\\n self.feature_stds[j]:\n row_incides_to_delete.append(i)\n matrix = np.delete(matrix, row_incides_to_delete, 0)\n return matrix, len(list(set(row_incides_to_delete)))",
"def samples_keep(self,index):\n\n\t\tif isinstance(index, (int, long)): index = range(self.samples)[-index:]\n\n\t\tself.sampled_topics = np.take(self.sampled_topics,index,axis=0)\n\t\tself.tt = np.take(self.tt,index,axis=2)\n\t\tself.dt = np.take(self.dt,index,axis=2)\n\n\t\tself.samples = len(index)"
] | [
"0.5047761",
"0.49634433",
"0.4951183",
"0.49368143",
"0.48976904",
"0.48730645",
"0.48015064",
"0.47932002",
"0.47760272",
"0.47559175",
"0.47461358",
"0.4691884",
"0.46875986",
"0.46452206",
"0.46400303",
"0.46148774",
"0.45951375",
"0.45937127",
"0.45784828",
"0.45669794",
"0.45590064",
"0.4557172",
"0.45571664",
"0.4548794",
"0.45470458",
"0.4546863",
"0.45458135",
"0.45454448",
"0.45389923",
"0.4529729"
] | 0.6461404 | 0 |
Reads the csv file from the given path(string) and returns it as a list | def read_csv(path):
csv_data =[]
with open(path, 'r') as csv_file:
csv_read = csv.reader(csv_file, dialect='excel')
for row in csv_read:
csv_data.append(row)
return(csv_data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_csv(csv_file_path):\n res = [] #list\n # f = open(csv_file_path) #read file\n with open(csv_file_path,\"r\") as f:",
"def read_csv(path):\r\n output = []\r\n for row in csv.DictReader(open(path)):\r\n output.append(row)\r\n return output",
"def read_csv(path):\n output = []\n for row in csv.DictReader(open(path)):\n output.append(row)\n return output",
"def load_csv(csv_path):\n with open(csv_path, newline='') as csv_file:\n csv_reader = csv.reader(csv_file)\n csv_list = [row for row in csv_reader]\n return csv_list",
"def read_csv(path):\r\n data = []\r\n csv_file = open(path)\r\n for row in csv.DictReader(csv_file):\r\n data.append(row)\r\n csv_file.close() \r\n return data",
"def load_csv(file_path, access_mode = \"r\"):\n with open(file_path, access_mode) as f:\n return list(csv.reader(f))",
"def read_csv_file(path):\n\n\twith open(path, encoding=\"utf-8\", newline=\"\") as msgfile:\n\t\tdata = reader(msgfile)\n\t\tnewdata = [[val for val in row] for row in data]\n\n\treturn newdata",
"def read_csv_to_list(csv_path):\n\n with open(csv_path, newline=\"\") as f:\n reader = csv.reader(f)\n data = list(reader)\n\n return data",
"def parse_csv_file(file_path):\n\n complete_data_list = []\n\n try:\n import_file = open(file_path, \"rb\")\n\n except IOError:\n print 'An error occured trying to read the file.'\n\n else:\n reader_file = csv.DictReader(import_file)\n complete_data_list = get_file_data(reader_file)\n import_file.close()\n\n return complete_data_list",
"def load_csv(csv_path, params={}):\n csv_content = []\n with open(csv_path, encoding='utf-8') as fd:\n obj = csv.reader(fd, params)\n for line in obj:\n csv_content.append(line)\n\n return csv_content",
"def read_csv(file_path, delimiter=\",\", quotechar='\"'):\n # Opening file\n with open(file_path, newline='') as csvfile:\n # Will be used to store content\n lsts = []\n\n # Loading and reading csv\n csv_data = csv.reader(csvfile, delimiter=delimiter, quotechar=quotechar)\n\n # Adding data to container\n for row in csv_data:\n lsts.append(row)\n\n return lsts",
"def get_csv_in_path(self, path):\n files = os.listdir((path))\n return files",
"def read_csv(csv_file):\r\n with open(csv_file, \"r\") as files:\r\n data = csv.reader(files)\r\n return list(data)",
"def load_csv(csvpath):\n with open(csvpath, \"r\") as csvfile:\n data = []\n csvreader = csv.reader(csvfile, delimiter=\",\")\n\n # Skip the CSV Header\n next(csvreader)\n\n # Read the CSV data\n for row in csvreader:\n data.append(row)\n return data",
"def load_csv(path: Path) -> Any:\n with open(path, newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n items = list(reader)\n return items",
"def read_csv(file_name):\n final_list = []\n reader = csv.reader(open(file_name, 'rb'), delimiter=',')\n for x in reader:\n final_list.append(x)\n return final_list",
"def read_csv_file(path):\n\tfrom csv import reader\n\n\tif PY2:\n\t\twith codecs.open(path, 'r', 'utf-8') as msgfile:\n\t\t\tdata = msgfile.read()\n\n\t\t\t# for japanese! #wtf\n\t\t\tdata = data.replace(chr(28), \"\").replace(chr(29), \"\")\n\t\t\tdata = reader([r.encode('utf-8') for r in data.splitlines()])\n\t\t\tnewdata = [[text_type(val, 'utf-8') for val in row] for row in data]\n\telse:\n\t\twith io.open(path, mode='r', encoding='utf-8', newline='') as msgfile:\n\t\t\tdata = reader(msgfile)\n\t\t\tnewdata = [[ val for val in row ] for row in data]\n\treturn newdata",
"def get_csv_data(file_path):\n reader = csv.DictReader(open(file_path), delimiter=\"\\t\")\n result = []\n for row in reader:\n result.append(row)\n\n return result",
"def read_csv(path, number_of_header_lines=0):\n # if not os.path.isfile(path):\n try:\n return genfromtxt(path, delimiter=', ', skip_header=number_of_header_lines)\n except:\n raise ValueError(\"File does not exist!\", path)",
"def readcsv(path, delimiter= ','):\n my_data = genfromtxt(path, delimiter= delimiter)\n return my_data",
"def csvread(file):\r\n thisfile = open(file)\r\n thisreader = csv.reader(thisfile)\r\n filelist = np.array(list(thisreader))\r\n return filelist",
"def read_csv(path: str) -> list[dict[str, str]]:\n with open(path, 'r') as f:\n return list(csv.DictReader(f))",
"def read_file_to_list(input_file):\n with open(input_file) as csvfile:\n csv_rows = csv.reader(csvfile)\n\n data = []\n for row in csv_rows:\n data.append(row)\n\n return data",
"def read_csv(path):\n return pd.read_csv(path)",
"def readCSV(self):\n\n content = []\n with open(self.filename) as file:\n sn = csv.Sniffer()\n sn.preferred = [self.delimiter]\n try:\n dialect = sn.sniff(file.read(1024))\n except csv.Error:\n if not file.endswith(\"csv\"):\n self.delimiter = \"\\t\"\n file.seek(0)\n reader = csv.reader(file, delimiter=self.delimiter)\n dialect = reader.dialect\n file.seek(0)\n reader = csv.reader(file, dialect)\n rownr = 0\n\n for row in reader:\n\n if rownr == 0:\n header = row\n else:\n # print(row)\n content.append(row)\n rownr += 1\n\n file.close()\n\n return content.copy()",
"def read_file(path: Union[str, Path], encoding: str = 'utf-8') -> Optional[List[str]]:\n if isinstance(path, str):\n path = Path(path).resolve()\n values: List[str] = []\n if path.suffix == '.csv':\n with path.open(encoding=encoding, newline='') as csvfile:\n values = [row for row in csv.reader(csvfile)]\n elif path.suffix == '.json':\n with path.open(encoding=encoding) as jsonfile:\n values = json.load(jsonfile)\n if not isinstance(values, list):\n _log.error(f\"Expected a list in JSON file {path.name},\"\n f\"got {type(values).__name__}\")\n return None\n else: # Parse as text, one value per line\n data = path.read_text(encoding=encoding)\n values = [line for line in data.split('\\n') if line != '']\n return values",
"def readCSV(self, csvFileName):\n\tdata = []\n\twith open(csvFileName) as csvFile:\n\t\treader = csv.reader(csvFile)\n\t\tfor row in reader:\n\t\t\tdata.append(row)\n\treturn data",
"def read_csv_rows(path: str) -> list[dict[str, str]]:\n file_handle = open(\"survey\", encoding=\"utf8\")\n csv_reader = DictReader(file_handle)\n rows: list[dict[str, str]] = []\n for row in csv_reader:\n rows.append(row)\n file_handle.close()\n return rows",
"def read_csv(filename):\n with open(filename) as csv:\n return [csv_line.strip().split(',') for csv_line in csv]",
"def read_file(path):\n with open(path) as _file:\n _list = _file.readlines()\n return _list"
] | [
"0.84735274",
"0.84323573",
"0.8389204",
"0.83887297",
"0.82988846",
"0.8259558",
"0.8183878",
"0.8143425",
"0.80833566",
"0.7938903",
"0.79075766",
"0.7878327",
"0.7857819",
"0.78288424",
"0.7688525",
"0.76732665",
"0.7578928",
"0.7515018",
"0.74723434",
"0.74717313",
"0.7451067",
"0.74224883",
"0.7359038",
"0.7355764",
"0.7336087",
"0.7323125",
"0.7296509",
"0.726197",
"0.7251898",
"0.72510093"
] | 0.8536996 | 0 |
Takes a numpy array of 3Axis Accelerometer data of the form [X1,Y1,Z1,Time,X2,Y2,Z2,Time.....XN,YN,ZN,Time] with any number of rows that relate to the number of samples for each sensor and N defined by the NO_SENSORS(int) parameter. A numpy array of dimension [n][(N4)] should therfore be provided with np_data(numpy array). The array is saved to a CSV file of path defined by the path(string) parameter and given a header as below. The NO_SENSORS should not exceed 5 due to the addition of the header. | def save_as_csv(path,data,NO_SENSORS):
HEADER1 = [ ['Sensor 1'],
['X','Y','Z','Time/ms'] ]
HEADER2 = [ ['Sensor 1',' ',' ',' ','Sensor 2'],
['X','Y','Z','Time/ms','X','Y','Z','Time/ms'] ]
HEADER3 = [ ['Sensor 1',' ',' ',' ','Sensor 2',' ',' ',' ','Sensor 3'],
['X','Y','Z','Time/ms','X','Y','Z','Time/ms','X','Y','Z','Time/ms'] ]
HEADER4 = [ ['Sensor 1',' ',' ',' ','Sensor 2',' ',' ',' ','Sensor 3',' ',' ',' ','Sensor 4'],
['X','Y','Z','Time/ms','X','Y','Z','Time/ms','X','Y','Z','Time/ms','X','Y','Z','Time/ms'] ]
HEADER5 = [ ['Sensor 1',' ',' ',' ','Sensor 2',' ',' ',' ','Sensor 3',' ',' ',' ','Sensor 4',' ',' ',' ','Sensor 5'],
['X','Y','Z','Time/ms','X','Y','Z','Time/ms','X','Y','Z','Time/ms','X','Y','Z','Time/ms','X','Y','Z','Time/ms'] ]
HEADERS = [HEADER1,HEADER2,HEADER3,HEADER4,HEADER5]
HEADER = HEADERS[NO_SENSORS - 1]
# The data is saved as a CSV file using the given path
with open(path, 'w') as csv_file:
csv_write = csv.writer(csv_file, dialect='excel')
csv_write.writerows(HEADER)
csv_write.writerows(data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def writeDataCSV(data,outpath,outfile,out_head=None,message='data'):\n if (out_head is not None):\n #nhead = out_head.count(',') + 1\n nhead = len(out_head.split(',')) # Split header at every comma\n if (data.shape[1] != nhead):\n print('Warning: No. of fields does not match number of headings in', \n 'output file:',outfile+'.csv')\n print('No. fields =',data.shape[1],', No. headings =',nhead)\n filename = join(outpath, outfile + '.csv')\n print('Saving',message,'in file:',filename)\n np.savetxt(filename,data,delimiter=',',header=out_head) \n return None",
"def save_csv_notes(filename, data):\n assert data.shape[1] == 5\n np.savetxt(\n filename,\n data,\n fmt=\"%d\",\n delimiter=\",\",\n header=\"beat,position,pitch,duration,program\",\n comments=\"\",\n )",
"def write_csv_file(array, filename):\n\tnp.savetxt(filename, array, delimiter=\",\")",
"def save_csv_codes(filename, data):\n assert data.shape[1] == 6\n np.savetxt(\n filename,\n data,\n fmt=\"%d\",\n delimiter=\",\",\n header=\"type,beat,position,pitch,duration,instrument\",\n comments=\"\",\n )",
"def write_CSV_data(fname, names, npts, nvar, append, data):\n \n if append > 0:\n f = open(fname,'a')\n else:\n f = open(fname,'w')\n for nm in names:\n f.write(nm+',')\n f.write('\\n')\n for j in range(npts):\n for n in range(nvar):\n f.write('%10.4e, ' % data.value(j,n))\n f.write('\\n')\n f.close()",
"def example_data_file():\n\n header1 = \"#Sample Interval: 0.100000 (seconds)\"\n header2 = \"Timestamp,AccelX,AccelY,RateX,RateY\"\n header3 = \"dd-mmm-yyyy HH:MM:SS.FFF,mm/s2,mm/s2,rad/s,rad/s\"\n\n start_date = dt.datetime(2016, 3, 17, 1, 0, 0)\n\n # Add one tenth of a second\n time_delta = dt.timedelta(0, 0, 0, 100)\n\n # Sample frequency in Hz\n sample_freq = 10\n\n # 20 in event duration in seconds\n Ts = 60 * 20\n\n # Number of points\n N = Ts * sample_freq\n\n # Array of times\n time = [start_date + i * time_delta for i in range(N)]\n time_str = [t.strftime(\"%Y-%m-%d %H:%M:%S.%f\") for t in time]\n\n ax, ay, Rx, Ry = example_data(sample_freq, Ts)\n\n data = [\n \",\".join([time_str[i], str(ax[i]), str(ay[i]), str(Rx[i]), str(Ry[i])]) for i in range(N)\n ]\n\n data.insert(0, header3)\n data.insert(0, header2)\n data.insert(0, header1)\n\n return \"\\n\".join(data)",
"def save_as_csv(time_series, data, path_and_file_name):\n\n parent_name = \"test\"\n parent_uqid = uuid.uuid4()\n\n file_obj = open(path_and_file_name, 'w')\n file_obj.write('version,'+str(2)+'\\n')\n file_obj.write('numOfCH,'+str(1)+'\\n')\n file_obj.write('type, scan\\n')\n file_obj.write('ch_type,'+str(0)+'\\n')\n\n file_obj.write('carpet pos,'+str(0)+'\\n')\n file_obj.write('parent_name,'+str(parent_name)+'\\n')\n file_obj.write('parent_uqid,'+str(parent_uqid)+'\\n')\n file_obj.write('parent_filename,'+str(path_and_file_name)+'\\n')\n\n file_obj.write('pc, 0\\n')\n file_obj.write('Time (ns), CH0 Auto-Correlation\\n')\n for time_step in range(0, time_series.shape[0]):\n file_obj.write(str(float(time_series[time_step]))+','+str(data[time_step])+ '\\n')\n file_obj.write('end\\n')\n\n file_obj.close()",
"def save_csv(data, name):\n\n data_shp = np.asarray(np.shape(data))\n data_shp[0] = data_shp[0] + 1\n\n new_array = np.zeros(data_shp)\n new_array[1:, :] = data\n\n csv_array = new_array.astype(str)\n\n # Format the first row with legend\n leg = ['Energy (MeV)', 'Attentuation (cm^-1)']\n\n csv_array[0, :] = leg\n\n np.savetxt(os.path.join(directory, f'{name}.csv'), csv_array, delimiter=',', fmt='%s')",
"def make_csv(file_of_data):\n with open(file_of_data, 'w') as f:\n writer = csv.writer(f)\n header = (\"Counter\", \"Date/time\", \"Latitude\", \"Longitude\", \"Temperature\", \"Humidity\")\n writer.writerow(header)",
"def generate_numeric_csv(rows: int, columns: int, file_path: str):\n a: np.ndarray = np.random.random(rows * columns)\n np.random.random_sample(100)\n a.reshape(shape=(rows, columns))\n np.savetxt(file_path, a, delimiter=',')\n logging.info(\"Numeric CSV saved %s\", file_path)",
"def write_data(self, filename,\n columns=('Q', 'R', 'dR'),\n header=None):\n if header is None:\n header = \"# %s\\n\"%' '.join(columns)\n with open(filename, 'wb') as fid:\n fid.write(asbytes(header))\n data = np.vstack([getattr(self, c) for c in columns])\n np.savetxt(fid, data.T)",
"def save_csv_txt(filename, data, delimiter=',', newline='\\n', header=None):\n if header is None:\n header = delimiter.join(data.dtype.names)\n\n with open(filename, 'w') as f:\n f.write(header + newline)\n for i, row in enumerate(data):\n if i % 1000 == 0:\n log.debug(\"Writing {f}: line {l}\".format(f=filename, l=i))\n line = delimiter.join(\"-9999\" if (value == -9999.0 or value == -9999.9) else str(value) for value in row)\n f.write(line + newline)",
"def export_data(self):\r\n \r\n \r\n output_file = 'export.csv'\r\n data = self.get_raw_data()\r\n \r\n if data != []:\r\n print('Writing to file', output_file)\r\n with open(output_file, 'w',) as csvfile:\r\n fluorescence_levels = csv.writer(csvfile)\r\n fluorescence_levels.writerow(['sensor_1','Time'])\r\n for i in data:\r\n fluorescence_levels.writerow(i)\r\n print('done')\r\n \r\n else:\r\n print('no recorded data')",
"def writeData(fname,data):\n with open(fname,\"w\",newline=\"\") as fo:\n wr = csv.writer(fo)\n wr.writerow([\"x\"]+[\"Series {}\".format(i+1) for i in range(len(data))])\n # just in case things are of different lengths\n n = max([len(d) for d in data])\n for i in range(n):\n lst = [i]\n for d in data:\n try:\n val = d[i]\n except IndexError:\n val = 0\n lst.append(val)\n wr.writerow(lst)",
"def init_csv_file(self):\n folder = \"/home/pi/data/\" + datetime.now().strftime(\"%Y_%m_%d\") + \"/\"\n if not os.path.isdir(folder):\n # append 'a' to the folder name until we find a name that does not exist\n while os.path.exists(folder):\n folder = folder[:-1] + \"a\" + \"/\"\n os.mkdir(folder)\n filename = folder + 'particledata_' + datetime.now().strftime (\"%H-%M-%S\") \n while os.path.exists(filename):\n filename = filename + '_a'\n filename += '.csv'\n log.info('Writing data to: ' + filename)\n self.file = open(filename, \"w\")\n self.file.write('Unix Time;Human Readable Time;pm 2.5;pm 10;Has Fix;Longitude;Latitude;Altitude;GPS Unix Time\\n')\n self.file.flush()\n self.synced_time = False",
"def csv_writer(data, path, arr):\n with open(path, \"w\", newline='') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames = arr)\n for line in data:\n writer.writerow(line)",
"def organize_data(path_dir, accelerometer_file, accelerometer_data):\n\n accelerometer_df = pd.read_csv(os.path.join(path_dir, accelerometer_file), usecols=['UTC time', 'x', 'y', 'z'])\n\n x_list = accelerometer_df['x']\n y_list = accelerometer_df['y']\n z_list = accelerometer_df['z']\n UTC_times_list = accelerometer_df['UTC time']\n\n x_y_z_list_for_hour = [] # will contain 60*60 values, that every value is [x,y,z]\n\n curr_line_index = 0\n curr_date_time = get_date_time_from_UTC_time(UTC_times_list[curr_line_index])\n for i in range(60):\n for j in range(60):\n if (curr_date_time.minute != i or curr_date_time.second != j) or curr_line_index + 1 == len(UTC_times_list): # the curr time is more or little then the wanted time, or we finished all the lines in the file --> there is a need to fulfill the values with 0,0,0\n continue\n else:\n x_y_z_list_for_hour.append([x_list[curr_line_index], y_list[curr_line_index], z_list[curr_line_index]])\n while curr_date_time.minute == i and curr_date_time.second <= j and curr_line_index + 1 != len(UTC_times_list):\n curr_line_index += 1\n curr_date_time = get_date_time_from_UTC_time(UTC_times_list[curr_line_index])\n date = get_date_from_file_name(accelerometer_file)\n hour = curr_date_time.hour\n if date not in accelerometer_data.data_dic:\n accelerometer_data.data_dic[date] = {}\n accelerometer_data.data_dic[date][hour] = x_y_z_list_for_hour",
"def quick_save_array(data, file_name, delimiter=',', ):\n data.tofile(file_name, sep=delimiter)",
"def export_csv(self, path):\r\n\r\n with open(path, 'w') as f:\r\n f.write('# h,hr,m')\r\n\r\n if self.rho is not None:\r\n f.write(',rho')\r\n if self.temperature is not None:\r\n f.write(',temperature')\r\n\r\n f.write('\\n')\r\n for i in range(self.shape[0]):\r\n for j in range(self.shape[1]):\r\n f.write(f'{self.h[i, j]},{self.hr[i, j]},{self.m[i, j]}')\r\n if self.rho is not None:\r\n f.write(f',{self.rho[i, j]}')\r\n if self.temperature is not None:\r\n f.write(f',{self.temperature[i, j]}')\r\n f.write('\\n')\r\n return",
"def put_2Darray(file,array,header='',format='',append='no'):\n lista=[]\n for i in range(array.shape[1]):lista.append(array[:,i])\n lista=tuple(lista)\n put_data(file,lista,header,format,append)",
"def write_data_to_csv(cell_cent_top_lst, u_top_fe_conv_lst, disp_cent_PD_array_lst, u_disp_PD_array_lst, file_path, file_name):\n import csv\n def _write_(abs_file_path, arr):\n with open(abs_file_path, 'w', newline='') as csvfile:\n writer = csv.writer(csvfile, delimiter=';')\n writer.writerows(arr)\n\n\n num_data = len(cell_cent_top_lst)\n for i in range(num_data):\n cel_cnt_file_name = path.join(file_path, file_name + str(i).zfill(2) + '_cel_cnt_top.csv')\n ufe_file_name = path.join(file_path, file_name + str(i).zfill(2) + '_ufe_top.csv')\n dsc_file_name = path.join(file_path, file_name + str(i).zfill(2) + '_dsp_cnt.csv')\n u_dsp_file_name = path.join(file_path, file_name + str(i).zfill(2) + '_u_dsp.csv')\n\n _write_(cel_cnt_file_name, cell_cent_top_lst[i])\n _write_(ufe_file_name, u_top_fe_conv_lst[i])\n _write_(dsc_file_name, disp_cent_PD_array_lst[i])\n _write_(u_dsp_file_name, u_disp_PD_array_lst[i])\n\n return",
"def data_callback(data_in):\n global data\n global cnt\n global outfile\n # print(data_in[0][0])\n data = np.roll(data, -1, axis=0)\n data[-1,:] = data_in\n outfile.write(\"{},{},{},{},{},{},{},{}\\n\".format(data[0][0], data[0][1], data[0][2], data[0][3], data[0][4], data[0][5], data[0][6], data[0][7]))\n cnt += 1",
"def _write_csv(self, file_name, metadata, dates, data, disclaimer,\n float_fmt):\n\n version = '# file_format: pysonde csv format version 1.0\\n'\n header = [version]\n #prepend parameter list and units with single #\n param_header = '# datetime, '\n unit_header = '# yyyy/mm/dd HH:MM:SS, '\n dtype_fmts = ['|S19']\n fmt = '%s, '\n for param in np.sort(data.keys()):\n param_header += param + ', '\n try:\n unit_header += data[param].dimensionality.keys()[0].symbol + \\\n ', '\n except:\n unit_header += 'nd, '\n fill_value = float(metadata['fill_value']) * data[param].units\n data[param][np.isnan(data[param])] = fill_value\n dtype_fmts.append('f8')\n fmt += float_fmt + ', '\n\n #prepend disclaimer and metadata with ##\n for line in disclaimer.splitlines():\n header.append('# disclaimer: ' + line + '\\n')\n\n #for key,val in metadata.items():\n # if not isinstance(val, np.ndarray):\n # header.append('# ' + str(key) + ': ' + str(val) + '\\n')\n # else:\n # param_header += key + ', '\n # unit_header += 'n/a, '\n # dtype_fmts.append(val.dtype)\n # fmt += '%s, '\n for key in np.sort(metadata.keys()):\n if not isinstance(metadata[key], np.ndarray):\n header.append('# %s: %s\\n' % (str(key), str(metadata[key])))\n\n else:\n param_header += key + ', '\n unit_header += 'n/a, '\n dtype_fmts.append(metadata[key].dtype)\n fmt += '%s, '\n\n #remove trailing commas\n param_header = param_header[:-2] + '\\n'\n unit_header = unit_header[:-2] + '\\n'\n fmt = fmt[:-2]\n\n header.append('# timezone: ' + str(self.default_tzinfo) + '\\n')\n header.append(param_header)\n header.append(unit_header)\n\n dtype = np.dtype({\n 'names': param_header.replace(' ', '').strip('#\\n').split(','),\n 'formats': dtype_fmts})\n\n write_data = np.zeros(dates.size, dtype=dtype)\n write_data['datetime'] = np.array(\n [datetime.datetime.strftime(dt, '%Y/%m/%d %H:%M:%S')\n for dt in dates])\n\n for key, val in metadata.items():\n if isinstance(val, np.ndarray):\n write_data[key] = val\n\n for param in data.keys():\n write_data[param] = data[param]\n\n #start writing file\n fid = open(file_name, 'w')\n fid.writelines(header)\n np.savetxt(fid, write_data, fmt=fmt)\n fid.close()",
"def save_array(self, name: str, array: np.ndarray):\r\n np.savetxt(self._path_for_csv(name), array, delimiter=\",\")",
"def write_torque_table(A, filename):\n f = open(filename, 'w')\n for row in range(np.size(A, axis=0)):\n A[row,:].tofile(f, sep=',')\n f.write('\\n')\n f.close()",
"def write_inputdata_file(filename = None, nerve_conditions = None, onsettime_samples = None):\n if not filename:\n print \"filename not valid\"\n return\n if not nerve_conditions:\n print \"no nerve_conditions given\"\n return\n if not onsettime_samples:\n print \"no onsettime_samples given\"\n return\n \n a_time_samples = np.array([onsettime_samples[nervcond] for nervcond in nerve_conditions]).T\n \n outfile = open(os.path.join('/extra/InVivoDog/Elazar/inputdata', filename),\n 'wt')\n \n csv_writer = csv.writer(outfile)\n \n csv_writer.writerow(['File #'] + nerve_conditions)\n \n for num, row in enumerate(a_time_samples):\n filenum = num + 1\n \n csv_writer.writerow([filenum] + list(row))\n \n outfile.close()",
"def writecsvfile(filename, columnnames, data):\n with open(filename, 'w', newline='') as csv_file:\n writer = csv.writer(csv_file, delimiter=',')\n writer.writerow(columnnames) # header row\n for row in data:\n writer.writerow(row[:])",
"def load_rawdata_phyphox(datafile):\n with zipfile.ZipFile(datafile) as datazip:\n with datazip.open('Accelerometer.csv', \"r\") as infile:\n rawdata = infile.readlines()\n data = {}\n data[\"time\"] = np.genfromtxt(rawdata, usecols=0, skip_header=1)\n N_acc = len(data[\"time\"])\n data[\"ax\"] = np.genfromtxt(rawdata, usecols=1, skip_header=1)\n data[\"ay\"] = np.genfromtxt(rawdata, usecols=2, skip_header=1)\n data[\"az\"] = np.genfromtxt(rawdata, usecols=3, skip_header=1)\n data[\"counter\"] = np.arange(N_acc)\n data[\"dt\"] = np.ones(N_acc) * (data[\"time\"][1] - data[\"time\"][0])\n with datazip.open('Gyroscope.csv', \"r\") as infile:\n rawdata = infile.readlines()\n data[\"rx\"] = np.genfromtxt(rawdata, usecols=1, skip_header=1)\n data[\"ry\"] = np.genfromtxt(rawdata, usecols=2, skip_header=1)\n data[\"rz\"] = np.genfromtxt(rawdata, usecols=3, skip_header=1)\n N_gyro = len(data[\"rx\"])\n N = min(N_acc, N_gyro)\n for key in data:\n data[key] = data[key][:N]\n return data",
"def write_data_csv(file,data,im_id,lock,num_validators=1):\n lock.acquire()\n with open(file, mode = 'a') as f:\n for row in data:\n f.write((str(im_id) + \",\" + str(num_validators)))\n for val in row:\n f.write(\",\")\n f.write(str(val))\n f.write(\"\\n\") \n lock.release()",
"def writeGP(loc, fname, data, header, ncol=6):\n size = len(data)\n nrow = int(size / ncol)\n size_last_row = size % ncol\n\n lines = \"\"\n for line in np.reshape(range(nrow * ncol), (nrow, ncol)):\n for val in line:\n lines += \"{:^20.6e}\".format(data[val]) + 3 * \" \"\n lines = lines.rstrip(3 * \" \") + \"\\n\"\n\n if size_last_row:\n for i in range(1, size_last_row + 1):\n lines += \"{:^20.6e}\".format(data[-i]) + 3 * \" \"\n lines = lines.rstrip(3 * \" \")\n\n with open(\"/\".join([loc, fname]), \"w\") as f:\n f.writelines(header)\n f.writelines(lines)\n return"
] | [
"0.62333757",
"0.60136515",
"0.59167486",
"0.58182275",
"0.5800514",
"0.57883203",
"0.5723681",
"0.5680198",
"0.55766135",
"0.5498357",
"0.54279953",
"0.5375188",
"0.53628296",
"0.5357926",
"0.53508735",
"0.52884793",
"0.5267095",
"0.5256821",
"0.52335376",
"0.5195675",
"0.51900804",
"0.5170502",
"0.5165822",
"0.5164019",
"0.5144597",
"0.51416564",
"0.5118418",
"0.5118218",
"0.5083838",
"0.5073986"
] | 0.763759 | 0 |
Plots 3Axis accelerometer data on seperate graphs per sensor each in a seperate figure. The next figure will appear once the first figure is closed. Takes a numpy array of 3Axis Accelerometer data of the form [X1,Y1,Z1,Time,X2,Y2,Z2,Time.....XN,YN,ZN,Time] with any number of rows that relate to the number of samples for each sensor and N defined by the NO_SENSORS(int) parameter. A numpy array of dimension [n][(N4)] should therfore be provided with data(numpy array). The dataSeelction parameter should be 0 or 1 and sets the Y axis to either 01024 ADC or 33 g respectively. | def plot_multifig(data,NO_SENSORS,dataSelection):
# Axis options
yAxisLimits = [[0,1024],[-3,3]]
# Plots a seperate graph for each sensor
for i in range(0,NO_SENSORS):
plt.figure(i + 1)
plt.title('Sensor ' + str(i + 1))
plt.plot(data[:,(3 + (4 * i))],data[:,(0 + (4 * i))],label='X Axis')
plt.plot(data[:,(3 + (4 * i))],data[:,(1 + (4 * i))],label='Y Axis')
plt.plot(data[:,(3 + (4 * i))],data[:,(2 + (4 * i))],label='Z Axis')
plt.ylim(yAxisLimits[dataSelection][0],yAxisLimits[dataSelection][1])
plt.xlabel('Time/s')
plt.ylabel('Acceleration/g')
plt.legend()
plt.show() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_singlefig(data,NO_SENSORS,dataSelection):\n \n # Axis options\n yAxisLimits = [[0,1024],[-3,3]]\n \n # Plots graphs for each sensor on 1 figure\n plt.figure(1)\n for i in range(0,NO_SENSORS):\n # The figure is seperated into subplots using the parameter. 231 means 2 rows, 3 columns, subplot 1\n plt.subplot(231 + i)\n plt.title('Sensor ' + str(i + 1))\n plt.plot(data[:,(3 + (4 * i))],data[:,(0 + (4 * i))],label='X Axis')\n plt.plot(data[:,(3 + (4 * i))],data[:,(1 + (4 * i))],label='Y Axis')\n plt.plot(data[:,(3 + (4 * i))],data[:,(2 + (4 * i))],label='Z Axis')\n plt.ylim(yAxisLimits[dataSelection][0],yAxisLimits[dataSelection][1])\n plt.xlabel('Time/s')\n plt.ylabel('Acceleration/g')\n plt.legend()\n plt.show()",
"def plot_channels(self, data_array):\n\n plt.figure()\n for p in range(1, 7):\n plt.subplot(6, 1, p)\n plt.plot(data_array[p-1, :])\n\n plt.draw()\n plt.show()\n return",
"def initialiseData(self):\n self.currentPosition = 0\n self.xs = scipy.linspace(0.0, self.numberOfPoints*self.resolution, self.numberOfPoints)\n self.cursorXS = self.getCurrentPositionArray()\n self.cursorVertical = scipy.array([self.verticalLimit,0.0])\n self.array0 = scipy.zeros(self.numberOfPoints)\n self.array1 = scipy.zeros(self.numberOfPoints)\n self.array2 = scipy.zeros(self.numberOfPoints)\n self.array3 = scipy.zeros(self.numberOfPoints)\n self.array4 = scipy.zeros(self.numberOfPoints)\n self.array5 = scipy.zeros(self.numberOfPoints)\n self.array6 = scipy.zeros(self.numberOfPoints)\n self.array7 = scipy.zeros(self.numberOfPoints)\n self.channels = [self.array0,self.array1,self.array2,self.array3,\n self.array4,self.array5,self.array6,self.array7]\n self.arrayPlotData = chaco.ArrayPlotData(xs=self.xs,channel0=self.array0,channel1=self.array1,\n channel2=self.array2,channel3=self.array3,\n channel4=self.array4,channel5=self.array5,\n channel6=self.array6,channel7=self.array7,\n cursorXS = self.cursorXS, cursorVertical=self.cursorVertical)#will be the ArrayPlotData We need",
"def data_vis():\n dataroot = 'solar_data.txt'\n debug = False \n diff = False\n X, y = read_data(dataroot, debug, diff)\n\n # First plot the original timeseries\n fig = plt.figure(figsize=(40,40))\n\n fig.add_subplot(3,3,1)\n plt.plot(y)\n plt.title('Avg Global PSP (vent/cor) [W/m^2]')\n # plt.show()\n\n fig.add_subplot(3,3,2)\n plt.plot(X[:,0])\n plt.title('Avg Zenith Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,3)\n plt.plot(X[:,1])\n plt.title('Avg Azimuth Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,4)\n plt.plot(X[:,2])\n plt.title('Avg Tower Dry Bulb Temp [deg C]')\n # plt.show()\n\n fig.add_subplot(3,3,5)\n plt.plot(X[:,3])\n plt.title('Avg Tower RH [%]')\n # plt.show()\n\n fig.add_subplot(3,3,6)\n plt.plot(X[:,4])\n plt.title('Avg Total Cloud Cover [%]')\n # plt.show()\n\n fig.add_subplot(3,3,7)\n plt.plot(X[:,5])\n plt.title('Avg Avg Wind Speed @ 6ft [m/s]')\n # plt.show()\n\n ##########################################################################################\n # Plotting the Fourier Transform of the signals\n\n freq = np.fft.fftfreq(len(y), 1*60*60)\n\n fig = plt.figure(figsize=(40,40))\n\n fig.add_subplot(3,3,1)\n plt.plot(freq, np.abs(np.fft.fft(y)))\n plt.title('Avg Global PSP (vent/cor) [W/m^2]')\n # plt.show()\n\n fig.add_subplot(3,3,2)\n plt.plot(freq, np.abs(np.fft.fft(X[:,0])))\n plt.title('Avg Zenith Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,3)\n plt.plot(freq, np.abs(np.fft.fft(X[:,1])))\n plt.title('Avg Azimuth Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,4)\n plt.plot(freq, np.abs(np.fft.fft(X[:,2])))\n plt.title('Avg Tower Dry Bulb Temp [deg C]')\n # plt.show()\n\n fig.add_subplot(3,3,5)\n plt.plot(freq, np.abs(np.fft.fft(X[:,3])))\n plt.title('Avg Tower RH [%]')\n # plt.show()\n\n fig.add_subplot(3,3,6)\n plt.plot(freq, np.abs(np.fft.fft(X[:,4])))\n plt.title('Avg Total Cloud Cover [%]')\n # plt.show()\n\n fig.add_subplot(3,3,7)\n plt.plot(freq, np.abs(np.fft.fft(X[:,5])))\n plt.title('Avg Avg Wind Speed @ 6ft [m/s]')\n # plt.show()\n\n ##################################################################################################\n # Print correlation matrix\n\n df = pd.DataFrame(np.c_[y, X])\n df.columns = ['Avg Global PSP (vent/cor) [W/m^2]','Avg Zenith Angle [degrees]','Avg Azimuth Angle [degrees]','Avg Tower Dry Bulb Temp [deg C]','Avg Tower RH [%]','Avg Total Cloud Cover [%]','Avg Avg Wind Speed @ 6ft [m/s]']\n f = plt.figure(figsize=(19, 15))\n plt.matshow(df.corr(), fignum=f.number)\n plt.xticks(range(df.shape[1]), df.columns, fontsize=14, rotation=20)\n plt.yticks(range(df.shape[1]), df.columns, fontsize=14)\n cb = plt.colorbar()\n cb.ax.tick_params(labelsize=14)\n plt.title('Correlation Matrix', fontsize=16);\n plt.show()",
"def plot_data(self, show=False):\n\n fig, ax = plt.subplots(2, 1, figsize = (8, 6))\n plt.subplots_adjust(hspace=0.5)\n\n nrows = 10\n\n for _ in range(nrows):\n\n # plot nrows random examples from the simulated train data \n if self.flatten:\n print ('Plotting data... reshaping the flattened data to %s'%str(input_shape))\n temp = self.data['data'][np.random.randint(self.n_train * self.n_s)].reshape(input_shape)\n x, y = temp.T[:,0]\n else:\n print ('Plotting data...')\n temp = self.data['data'][np.random.randint(self.n_train * self.n_s)].reshape(ncombinations,len(ells))\n Cl = temp[0] # plot the (0,0) autocorrelation bin\n\n if self.rescaled:\n ax[0].plot(ells, Cl)\n else:\n ax[0].loglog(ells, ells*(ells+1)*Cl)\n ax[0].set_title(f'{nrows} examples from training data, Cl (0,0)')\n ax[0].set_xlabel(r'$\\ell$')\n ax[0].set_xscale('log')\n if self.rescaled:\n ax[0].set_ylabel(r'$C_\\ell$')\n else:\n ax[0].set_ylabel(r'$\\ell(\\ell+1) C_\\ell$')\n \n\n # plot nrows random examples from the simulated test data \n if self.flatten:\n temp = self.data['validation_data'][np.random.randint(self.n_s)].reshape(input_shape)\n x, y = temp.T[:,0]\n else:\n temp = self.data['validation_data'][np.random.randint(self.n_train * self.n_s)].reshape(ncombinations,len(ells))\n Cl = temp[0] # plot the (0,0) autocorrelation bin\n\n if self.rescaled:\n ax[1].plot(ells, Cl)\n else:\n ax[1].loglog(ells, ells*(ells+1)*Cl)\n ax[1].set_title(f'{nrows} examples from test data, Cl (0,0)')\n ax[1].set_xlabel(r'$\\ell$')\n ax[1].set_xscale('log')\n if self.rescaled:\n ax[1].set_ylabel(r'$C_\\ell$')\n else:\n ax[1].set_ylabel(r'$\\ell(\\ell+1) C_\\ell$')\n\n # plt.legend()\n\n plt.savefig(f'{self.figuredir}data_visualization_{self.modelversion}.png')\n if show: plt.show()\n plt.close()",
"def updateArrayPlotData(self):\n self.arrayPlotData.set_data(\"channel0\",self.array0)\n self.arrayPlotData.set_data(\"channel1\",self.array1)\n self.arrayPlotData.set_data(\"channel2\",self.array2)\n self.arrayPlotData.set_data(\"channel3\",self.array3)\n self.arrayPlotData.set_data(\"channel4\",self.array4)\n self.arrayPlotData.set_data(\"channel5\",self.array5)\n self.arrayPlotData.set_data(\"channel6\",self.array6)\n self.arrayPlotData.set_data(\"channel7\",self.array7)\n self.arrayPlotData.set_data(\"cursorXS\",self.cursorXS)\n #self.arrayPlotData.set_data(\"cursorVertical\",self.cursorVertical)",
"def plot_data(data, fig):\n\n if data.shape[1] > 3:\n print(\"Warning: data dimension is larger than 3, dim is %s\" % (data.shape[1]))\n\n ax = fig.add_subplot(111, projection='3d')\n # ax.scatter(data[:, 0], data[:, 1], data[:, 2], marker='.', s=0.5)\n return ax",
"def __init__(self, fpga, plots, chann=6069, freqs=[0, 67.5], bw=67.5):\n\n self.fpga = fpga\n self.fpga.write_int('cnt_rst',0) #just in case\n self.plots = plots\n self.nplots = len(self.plots)\n self.chann = chann\n self.freq = freqs\n self.bw = bw\n self.fft_freq = np.linspace(0, bw, 2**13,endpoint=False)\n self.plot_map = {1:'11', 2:'12', 3:'22', 4:'22', 5:'23',\n 6:'23', 7: '33', 8:'33', 9:'33'}\n self.fig = plt.figure()\n self.axes = []\n self.data = [] \n \n #generate a dict for the specification of each plot\n #the info is encoded in [title, y_label, x_label,(y_init, y_end), (x_init, x_end), [brams], data_type]\n self.plot_info = {'spect0':['Spectrum ZDOK0', '[dB]', '[MHz]',\n (30, 180), (self.freq), ['1_A2'], '>8192Q'],\n 'spect1':['Spectrum ZDOK1', '[dB]', '[MHz]',\n (30, 180), (self.freq), ['1_B2'], '>8192Q'],\n 're_full':['Real correlation', '', '[MHz]',\n (30,180), (self.freq), ['AB_re'], '>8192q'],\n 'im_full':['Imag correlation', '', '[MHz]',\n (30,180), (self.freq), ['AB_im'], '>8192q'],\n 'phase':['Relative Phase', ('['+u'\\xb0'+']'), '[MHz]',\n (-180,180), (self.freq), ['AB_im', 'AB_re'], '>8192q'],\n 'chann_pow':['Relative Power at'+str(self.fft_freq[self.chann]),\n '[dB]','[MHz]',(-180,180), (0, 8191),\n ['PowA', 'PowB'], '>8192Q'],\n 'chann_phase':['Relative phase at'+str(self.fft_freq[self.chann]),\n ('['+u'\\xb0'+']'), '[MHz]',(-180,180), (0,8191),\n ['phase'], '>16384q']}\n\n\tself.fpga.write_int('mux_sel',0)\n\tself.fpga.write_int('n_points', 16384)\n\tself.fpga.write_int('reading_data',1)\n\tself.fpga.write_int('reading_data',0)\n\tself.create_plots()\n\tanim = animation.FuncAnimation(self.fig, self.animate, blit=True)\n\tplt.show()",
"def plot(self):\n\n # initialize outside the loop to avoid memory leak\n\n plot_a = None\n\n # initial plotting scales\n vmin = 0\n vmax = 0\n pmin = 0\n pmax = 0\n\n sr = self.dio.get_properties(self.channel)['samples_per_second']\n\n if self.control.verbose:\n print 'sample rate: ', sr\n\n # initial time info\n display_lag = 60\n b = self.dio.get_bounds(self.channel)\n\n if self.control.verbose:\n print 'data bounds: ', b\n\n if self.control.start:\n dtst0 = dateutil.parser.parse(self.control.start)\n st0 = (dtst0 - datetime.datetime(1970, 1,\n 1, tzinfo=pytz.utc)).total_seconds()\n st0 = int(st0 * sr)\n else:\n st0 = int(b[0])\n\n if self.control.end:\n dtst0 = dateutil.parser.parse(self.control.end)\n et0 = (dtst0 - datetime.datetime(1970, 1,\n 1, tzinfo=pytz.utc)).total_seconds()\n et0 = int(et0 * sr)\n else:\n et0 = int(b[1])\n\n if self.control.verbose:\n\n print 'start sample st0: ', st0\n print 'end sample et0: ', et0\n\n blocks = self.control.bins * self.control.frames\n\n samples_per_stripe = self.control.num_fft * \\\n self.control.integration * self.control.decimation\n total_samples = blocks * samples_per_stripe\n\n if total_samples > (et0 - st0):\n print 'Insufficient samples for %d samples per stripe and %d blocks between %ld and %ld' % (samples_per_stripe, blocks, st0, et0)\n return\n\n stripe_stride = (et0 - st0) / blocks\n\n bin_stride = stripe_stride / self.control.bins\n\n start_sample = st0\n\n print 'first ', start_sample\n\n # get metadata\n # this could be done better to ensure we catch frequency or sample rate\n # changes\n mdt = self.dio.read_metadata(st0, et0, self.channel)\n try:\n md = mdt[mdt.keys()[0]]\n cfreq = md['center_frequencies'].ravel()[self.sub_channel]\n except (IndexError, KeyError):\n cfreq = 0.0\n\n if self.control.verbose:\n print 'processing info : ', self.control.frames, self.control.bins, samples_per_stripe, bin_stride\n\n for p in numpy.arange(self.control.frames):\n sti_psd_data = numpy.zeros(\n [self.control.num_fft, self.control.bins], numpy.float)\n sti_times = numpy.zeros([self.control.bins], numpy.complex128)\n\n for b in numpy.arange(self.control.bins):\n\n if self.control.verbose:\n print 'read vector :', self.channel, start_sample, samples_per_stripe\n\n d_vec = self.dio.read_vector(\n start_sample, samples_per_stripe, self.channel)\n data = d_vec[:, self.sub_channel]\n\n if self.control.decimation > 1:\n data = scipy.signal.decimate(data, self.control.decimation)\n sample_freq = sr / self.control.decimation\n else:\n sample_freq = sr\n\n if self.control.mean:\n detrend_fn = matplotlib.mlab.detrend_mean\n else:\n detrend_fn = matplotlib.mlab.detrend_none\n\n try:\n psd_data, freq_axis = matplotlib.mlab.psd(\n data, NFFT=self.control.num_fft, Fs=float(sample_freq), detrend=detrend_fn, scale_by_freq=False)\n except:\n traceback.print_exc(file=sys.stdout)\n\n sti_psd_data[:, b] = numpy.real(\n 10.0 * numpy.log10(numpy.abs(psd_data) + 1E-12))\n\n sti_times[b] = start_sample / sr\n\n start_sample += stripe_stride\n\n # Now Plot the Data\n ax = self.subplots[p]\n\n # determine image x-y extent\n extent = (\n 0,\n self.control.bins,\n numpy.min(freq_axis) / 1e3,\n numpy.max(freq_axis) / 1e3,\n )\n\n # determine image color extent in log scale units\n Pss = sti_psd_data\n vmin = numpy.real(numpy.median(Pss) - 6.0)\n vmax = numpy.real(numpy.median(\n Pss) + (numpy.max(Pss) - numpy.median(Pss)) * 0.61803398875 + 50.0)\n\n if self.control.zaxis:\n vmin = int(string.split(self.control.zaxis, ':')[0])\n vmax = int(string.split(self.control.zaxis, ':')[1])\n else:\n vmin = numpy.real(numpy.median(Pss) - 6.0)\n vmax = numpy.real(numpy.median(\n Pss) + (numpy.max(Pss) - numpy.median(Pss)) * 0.61803398875 + 50.0)\n\n im = ax.imshow(sti_psd_data, cmap='jet', origin='lower', extent=extent,\n interpolation='nearest', vmin=vmin, vmax=vmax, aspect='auto')\n\n ax.set_ylabel('f (kHz)', fontsize=8)\n\n # plot dates\n\n tick_spacing = numpy.arange(\n self.control.bins / 8, self.control.bins, self.control.bins / 8)\n ax.set_xticks(tick_spacing)\n tick_labels = []\n\n for s in tick_spacing:\n tick_time = sti_times[s]\n\n if tick_time == 0:\n tick_string = ''\n else:\n gm_tick_time = time.gmtime(numpy.real(tick_time))\n tick_string = '%02d:%02d:%02d' % (\n gm_tick_time[3], gm_tick_time[4], gm_tick_time[5])\n tick_labels.append(tick_string)\n\n ax.set_xticklabels(tick_labels)\n\n # set the font sizes\n tl = ax.get_xticklabels()\n\n for tk in tl:\n tk.set_size(8)\n del tl\n\n tl = ax.get_yticklabels()\n\n for tk in tl:\n tk.set_size(8)\n del tl\n\n print 'last ', start_sample\n\n # create a time stamp\n start_time = st0 / sr\n srt_time = time.gmtime(start_time)\n sub_second = int(round((start_time - int(start_time)) * 100))\n\n timestamp = \"%d-%02d-%02d %02d:%02d:%02d.%02d UT\" % (srt_time[0], srt_time[\n 1], srt_time[2], srt_time[3], srt_time[4], srt_time[5], sub_second)\n\n self.f.suptitle('%s %s %4.2f MHz (%s)' % (\n self.control.title, timestamp, cfreq / 1E6, self.control.path), fontsize=10)\n\n # ax.legend(fontsize=8)\n ax.set_xlabel('time (UTC)', fontsize=8)\n\n # fixup ticks\n\n tl = ax.get_xticklabels()\n for tk in tl:\n tk.set_size(8)\n del tl\n tl = ax.get_yticklabels()\n for tk in tl:\n tk.set_size(8)\n del tl\n\n self.gridspec.update()\n\n self.f.tight_layout()\n\n self.f.subplots_adjust(top=0.95, right=0.88)\n cax = self.f.add_axes([0.9, 0.12, 0.02, 0.80])\n self.f.colorbar(im, cax=cax)\n if self.control.outname:\n fname, ext = os.path.splitext(self.control.outname)\n if ext == '':\n ext = '.png'\n print \"Save plot as {}\".format(fname+ext)\n matplotlib.pyplot.savefig(fname+ext)\n if self.control.appear or not self.control.outname:\n print \"Show plot\"\n matplotlib.pyplot.show()",
"def make_wave_plots(Nsteps=NSTEPS, plotAmp=0):\n\n if plotAmp != 2:\n ax, fig = setup_plot()\n\n for k in range(Nsteps):\n if plotAmp == 2: # separate plot for each step\n ax, fig = setup_plot()\n\n for i in range(len(q_vec)):\n dataLen = len(t_vec)\n startIdx = k*dataLen/Nsteps\n endIdx = (k+1)*dataLen/Nsteps\n xVals = np.ones(dataLen)*q_vec[i]\n yVals = t_vec\n zVals = h_vec[i]\n zorder_waves = get_z_order_waves(q_vec, i)\n\n if plotAmp==0: # plot real parts in increments\n ax.plot3D(xVals[startIdx:endIdx], yVals[startIdx:endIdx], \\\n zs=np.real(zVals[startIdx:endIdx]), color=color_wave_list[i], \\\n lw=line_width, zorder=zorder_waves)\n elif plotAmp==1: # plot fixed real part, but amplitude in increments\n if k == 0:\n ax.plot3D(xVals, yVals, zs=np.real(zVals), color=color_wave_list[i], \\\n lw=line_width, zorder=zorder_waves)\n ax.plot3D(xVals[startIdx:endIdx], yVals[startIdx:endIdx], \\\n zs=np.abs(zVals[startIdx:endIdx]), color=color_amp, \\\n lw=line_width, zorder=zorder_waves)\n elif plotAmp==2: # plot amplitude, roll along t with height, plot empirical time nodes\n ax.plot3D(xVals, yVals, zs=np.abs(zVals), color=color_amp, lw=line_width) # plot amp\n tmp_dataLen = 1000\n tmp_x = q_vec[i]\n tmp_y = t_vec[startIdx]\n tmp_z = np.abs(h_vec[i][startIdx])\n tmp_xVals = np.ones(tmp_dataLen)*tmp_x\n tmp_yVals = np.ones(tmp_dataLen)*tmp_y\n tmp_zVals = np.linspace(0, tmp_z, tmp_dataLen)\n ax.plot3D(tmp_xVals, tmp_yVals, zs=tmp_zVals, color=color_ht, lw=line_width) # roll along t with height\n if tmp_y < 75: # plot the marker for the roll\n ax.scatter(tmp_x, tmp_y, tmp_z, c=color_marker, marker='s', s=marker_size_square)\n\n # plot empirical nodes after rolling past them\n for emp_time in empirical_node_times:\n if tmp_y >= emp_time: # If we rolled past this emp node, plot it always\n emp_idx = np.argmin(np.abs(t_vec - emp_time))\n this_x = q_vec[i]\n this_y = t_vec[emp_idx]\n this_z = np.abs(h_vec[i][emp_idx])\n # plot marker for empirical nodes\n ax.scatter(this_x, this_y, this_z, c=color_marker_nd, \\\n marker='o', s=marker_size)\n\n ax.set_xlim(0, 10)\n ax.set_ylim(min(t_vec), max(t_vec))\n ax.set_zlim(-ZLIM, ZLIM)\n ax.set_xlabel('$q$', fontsize=label_fontsize)\n ax.set_ylabel('$t$ $(M)$', fontsize=label_fontsize)\n\n\n # Make sure to save such that files end up in the same alphabetical order as you\n # want in the video. Here we just add a fake prefix.\n if plotAmp==0:\n save_plot('%s/aa_wave_%.4d.png'%(plotdir, k))\n elif plotAmp==1:\n save_plot('%s/ab_waveamp_%.4d.png'%(plotdir, k))\n elif plotAmp==2:\n ax.set_zlim(0, ZLIM)\n save_plot('%s/ba_amp_%.4d.png'%(plotdir, k))\n P.close()\n\n P.close()",
"def main():\n data = load_data()\n # BNO055 absolute orientation sensor\n bno_time = data[2].index / 1e6\n bno_accel_axes = [data[2][bno_str] / 9.8 for bno_str in BNO_ACCEL]\n plot_multi_axis(bno_time, bno_accel_axes,\n labels=['BNO055 Acceleration',\n 'Time (s)', 'Acceleration (G)',\n [*BNO_ACCEL, 'magnitude']],\n fname='bno_accel.html')\n bno_gyro_axes = [data[2][bno_str] for bno_str in BNO_GYRO]\n plot_multi_axis(bno_time, bno_gyro_axes,\n labels=['BNO055 Roll Rate',\n 'Time (s)', 'Roll Rate (deg/s)',\n [*BNO_GYRO, 'magnitude']],\n fname='bno_gyro.html')\n # MMA65XX high-range accelerometer\n mma_time = data[1].index / 1e6\n mma_axes = [data[1][mma_str] / 9.8 for mma_str in MMA]\n plot_multi_axis(mma_time, mma_axes,\n labels=['MMA65XX High-Range Acceleration',\n 'Time (s)', 'Acceleration (G)',\n [*MMA, 'magnitude']],\n fname='mma.html')\n # skybass_sampling_rates(data)",
"def plot_3d(x_data, y_data, Z, df, xlabel, ylabel, xrange=None,\n yrange=None, figsize=(12, 12)):\n fig = pyplot.figure(figsize=figsize)\n ax = fig.add_subplot(111, projection='3d')\n nsamp, nsen = Z.shape\n\n sen_index = df.columns.names.index('sensor')\n senlist = df.columns.levels[sen_index]\n pyplot.yticks(y_data, senlist)\n ax.plot_surface(\n np.repeat(x_data,\n nsen, axis=1),\n np.repeat(np.matrix(y_data), nsamp, axis=0),\n df.values,\n cmap=cm.coolwarm)\n pyplot.xlabel(xlabel)\n pyplot.ylabel('Sensor name')\n ax.set_zlabel(ylabel)\n ax.view_init(elev=45., azim=-130)\n ax.tick_params(axis='y', which='major', labelsize=4)\n pyplot.show()",
"def plot_examples(cms):\r\n data = amp_axis\r\n\r\n fig, axs = plt.subplots(1, 2, figsize=(30, 8)) #create two plots\r\n for [ax, cmap] in zip(axs, cms):\r\n psm = ax.pcolormesh(time_axis, tof_axis, data, cmap=cmap, rasterized=True, vmin = 250) #specify axis and minimum amplitude value to show on the graph\r\n fig.colorbar(psm, ax=ax, label = 'Amplitude') #define the legend of the amplitude data\r\n \r\n ax.set_ylabel('Time of Flight [\\u03bcs]') #set label for y axis\r\n ax.set_xlabel('Time [min]') #set label for x axis\r\n \r\n ax.hlines(8.744, 0, stop_time, colors = 'white') #create two white lines for the safe operating range for ToF\r\n ax.hlines(9.555, 0, stop_time, colors = 'white') \r\n \r\n plt.show()",
"def visualize_signal(self):\n plt.figure()\n plt.title('Accelerometer Signal')\n plt.plot(range(len(self.data)), self.data[1])",
"def plotData(BX,BY,xi,yi,expArr,t,savepath_dir):\r\n \r\n #Find the current channel data\r\n Jz=newCurrent(BX,BY,xi,yi,expArr,t)\r\n\r\n #Find the dipole vector components\r\n BxTime=np.real(BX*expArr[t])\r\n ByTime=np.real(BY*expArr[t])\r\n\r\n #Plot the current density contour and dipole vector grid\r\n #Create the figure\r\n p1=plt.figure(figsize=(9,8))\r\n \r\n #Plot the data\r\n p1=plt.contourf(xi,yi,Jz,levels=100,vmin=-0.1,vmax=0.1)\r\n qv1=plt.quiver(xi,yi,BxTime,ByTime,width=0.004,scale=3)\r\n \r\n #Add axes labels and title\r\n p1=plt.xlabel('X [cm]',fontsize=20)\r\n p1=plt.ylabel('Y [cm]',fontsize=20)\r\n # p1=plt.title('Alfven Wave Dipole; Frequency='+str(freq)+r'KHz; $\\nu_{ei}$='+str(col)+'KHz',fontsize=19,y=1.02)\r\n p1=plt.title('E Field; Frequency='+str(freq)+r'KHz; $\\nu_{ei}$='+str(col)+'KHz',fontsize=19,y=1.02)\r\n \r\n #Set axes parameters\r\n p1=plt.xticks(np.arange(-50,51,5))\r\n p1=plt.yticks(np.arange(-50,51,5))\r\n p1=plt.xlim(-xAxisLim,xAxisLim)\r\n p1=plt.ylim(-yAxisLim,yAxisLim)\r\n \r\n #Add colorbar\r\n cbar=plt.colorbar()\r\n cbar.set_label('Normalized Current Density',rotation=270,labelpad=15)\r\n cbar=plt.clim(-1,1)\r\n \r\n #Add vector label\r\n plt.quiverkey(qv1,-0.1,-0.1,0.2,label=r'$(B_x,B_y)$')\r\n \r\n #Miscellaneous\r\n p1=plt.tick_params(axis='both', which='major', labelsize=18)\r\n p1=plt.grid(True)\r\n p1=plt.gcf().subplots_adjust(left=0.15)\r\n\r\n #Save the plot\r\n savepath_frame=savepath_dir+'frame'+str(t+1)+'.png'\r\n p1=plt.savefig(savepath_frame,dpi=100,bbox_to_anchor='tight')\r\n p1=plt.close()\r\n\r\n #Let me know which frame we just saved\r\n print('Saved frame '+str(t+1)+' of '+str(len(expArr)))\r\n \r\n return",
"def plot_sensors_3d_intra(ax: str, epo1: mne.Epochs, epo2: mne.Epochs, lab: bool = False):\n\n # extract sensor infos and transform loc to fit with headmodel \n loc1 = copy(np.array([ch['loc'][:3] for ch in epo1.info['chs']]))\n loc1 = transform(loc1, traX=0, traY=0, traZ=0.04, rotY=0, rotZ=(-np.pi/2))\n lab1 = [ch for ch in epo1.ch_names]\n\n loc2 = copy(np.array([ch['loc'][:3] for ch in epo2.info['chs']]))\n loc2 = transform(loc2, traX=0, traY=0.5, traZ=0.04, rotY=0, rotZ=(-np.pi/2))\n lab2 = [ch for ch in epo2.ch_names]\n\n bads_epo1 =[]\n bads_epo1 = epo1.info['bads']\n bads_epo2 =[]\n bads_epo2 = epo2.info['bads']\n\n # plot sensors\n for ch in epo1.ch_names:\n if ch in bads_epo1:\n index_ch = epo1.ch_names.index(ch)\n x1, y1, z1 = loc1[index_ch, :]\n ax.scatter(x1, y1, z1, marker='x', color='dimgrey')\n if lab:\n ax.text(x1+0.012, y1+0.012 ,z1, lab1[index_ch],\n horizontalalignment='center',\n verticalalignment='center')\n else:\n index_ch = epo1.ch_names.index(ch)\n x1, y1, z1 = loc1[index_ch, :]\n ax.scatter(x1, y1, z1, marker='o', color='dimgrey')\n if lab:\n ax.text(x1+0.012, y1+0.012 ,z1, lab1[index_ch],\n horizontalalignment='center',\n verticalalignment='center')\n\n for ch in epo2.ch_names:\n if ch in bads_epo2:\n index_ch = epo2.ch_names.index(ch)\n x2, y2, z2 = loc2[index_ch, :]\n ax.scatter(x2, y2, z2, marker='x', color='dimgrey')\n if lab:\n ax.text(x2+0.012, y2+0.012 ,z2, lab2[index_ch],\n horizontalalignment='center',\n verticalalignment='center')\n else:\n index_ch = epo2.ch_names.index(ch)\n x2, y2, z2 = loc2[index_ch, :]\n ax.scatter(x2, y2, z2, marker='o', color='dimgrey')\n if lab:\n ax.text(x2+0.012, y2+0.012 ,z2, lab2[index_ch],\n horizontalalignment='center',\n verticalalignment='center')",
"def plot_sensors_3d_inter(ax: str, epo1: mne.Epochs, epo2: mne.Epochs, lab: bool = False):\n\n # extract sensor infos and transform loc to fit with headmodel \n loc1 = copy(np.array([ch['loc'][:3] for ch in epo1.info['chs']]))\n loc1 = transform(loc1, traX=-0.17, traY=0, traZ=0.08, rotY=(-np.pi/12), rotZ=(-np.pi/2))\n lab1 = [ch for ch in epo1.ch_names]\n\n loc2 = copy(np.array([ch['loc'][:3] for ch in epo2.info['chs']]))\n loc2 = transform(loc2, traX=0.17, traY=0, traZ=0.08, rotY=(np.pi/12), rotZ=np.pi/2)\n lab2 = [ch for ch in epo2.ch_names]\n\n bads_epo1 =[]\n bads_epo1 = epo1.info['bads']\n bads_epo2 =[]\n bads_epo2 = epo2.info['bads']\n\n # plot sensors ('x' for bads)\n for ch in epo1.ch_names:\n if ch in bads_epo1:\n index_ch = epo1.ch_names.index(ch)\n x1, y1, z1 = loc1[index_ch, :]\n ax.scatter(x1, y1, z1, marker='x', color='dimgrey')\n if lab:\n ax.text(x1+0.012, y1+0.012 ,z1, lab1[index_ch],\n horizontalalignment='center',\n verticalalignment='center')\n else:\n index_ch = epo1.ch_names.index(ch)\n x1, y1, z1 = loc1[index_ch, :]\n ax.scatter(x1, y1, z1, marker='o', color='dimgrey')\n if lab:\n ax.text(x1+0.012, y1+0.012 ,z1, lab1[index_ch],\n horizontalalignment='center',\n verticalalignment='center')\n\n for ch in epo2.ch_names:\n if ch in bads_epo2:\n index_ch = epo2.ch_names.index(ch)\n x2, y2, z2 = loc2[index_ch, :]\n ax.scatter(x2, y2, z2, marker='x', color='dimgrey')\n if lab:\n ax.text(x2+0.012, y2+0.012 ,z2, lab2[index_ch],\n horizontalalignment='center',\n verticalalignment='center')\n else:\n index_ch = epo2.ch_names.index(ch)\n x2, y2, z2 = loc2[index_ch, :]\n ax.scatter(x2, y2, z2, marker='o', color='dimgrey')\n if lab:\n ax.text(x2+0.012, y2+0.012 ,z2, lab2[index_ch],\n horizontalalignment='center',\n verticalalignment='center')",
"async def plot_device_data(self, axes, name) -> []:\n pass",
"def plot_three(estacion,formato):\n global num_ticks\n\n if formato == 'vladi':\n ruta='/home/oscar/Doctorado/GPS/programas/python/datos_vladi/completos/'\n ns_file = ruta + 'mb_' + estacion.upper() + '_GP0.dat1'\n ew_file = ruta + 'mb_' + estacion.upper() + '_GP0.dat2'\n up_file = ruta + 'mb_' + estacion.upper() + '_GP0.dat3'\n ns_archivo=open(ns_file,'r')\n ew_archivo=open(ew_file,'r')\n up_archivo=open(up_file,'r')\n ns_datos=ns_archivo.readlines()[3:]\n ew_datos=ew_archivo.readlines()[3:]\n up_datos=up_archivo.readlines()[3:]\n ns_date=np.zeros((len(ns_datos),1))\n ns_data=np.zeros((len(ns_datos),1))\n ns_dat=np.zeros((len(ns_datos),1))\n ns_error=np.zeros((len(ns_datos),1))\n for i,lineas in enumerate(ns_datos):\n ns_date[i],ns_data[i],ns_error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[2]\n ns_x,ns_y = rem_mean(ns_date,ns_data,ns_error)\n ns_ticks,ns_labels = t_ticks(ns_x[0],ns_x[-1],num_ticks)\n ns_y = ns_y *1e5\n ew_date=np.zeros((len(ew_datos),1))\n ew_data=np.zeros((len(ew_datos),1))\n ew_dat=np.zeros((len(ew_datos),1))\n ew_error=np.zeros((len(ew_datos),1))\n for i,lineas in enumerate(ew_datos):\n ew_date[i],ew_data[i],ew_error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[2]\n ew_x,ew_y = rem_mean(ew_date,ew_data,ew_error)\n ew_ticks,ew_labels = t_ticks(ew_x[0],ew_x[-1],num_ticks)\n ew_y = ew_y *1e5\n up_date=np.zeros((len(up_datos),1))\n up_data=np.zeros((len(up_datos),1))\n up_dat=np.zeros((len(up_datos),1))\n up_error=np.zeros((len(up_datos),1))\n for i,lineas in enumerate(up_datos):\n up_date[i],up_data[i],up_error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[2]\n up_x,up_y = rem_mean(up_date,up_data,up_error)\n up_ticks,up_labels = t_ticks(up_x[0],up_x[-1],num_ticks)\n up_y = up_y *1e5\n elif formato == 'sara':\n ruta = '/home/oscar/Doctorado/GPS/programas/python/datos_sara/'\n ns_file = ruta + estacion.upper() + '/lat.' + estacion.lower() + '.dat'\n ew_file = ruta + estacion.upper() + '/long.' + estacion.lower() + '.dat'\n up_file = ruta + estacion.upper() + '/height.' + estacion.lower() + '.dat'\n ns_archivo=open(ns_file,'r')\n ew_archivo=open(ew_file,'r')\n up_archivo=open(up_file,'r')\n ns_datos=ns_archivo.readlines()\n ew_datos=ew_archivo.readlines()\n up_datos=up_archivo.readlines()\n ns_date=np.zeros((len(ns_datos),1))\n ns_data=np.zeros((len(ns_datos),1))\n ns_dat=np.zeros((len(ns_datos),1))\n ns_error=np.zeros((len(ns_datos),1))\n for i,lineas in enumerate(ns_datos):\n ns_date[i],ns_data[i],ns_error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[2]\n ns_x,ns_y = rem_mean(ns_date,ns_data,ns_error)\n ns_ticks,ns_labels = t_ticks(ns_x[0],ns_x[-1],num_ticks)\n ns_y = ns_y *1e5\n ew_date=np.zeros((len(ew_datos),1))\n ew_data=np.zeros((len(ew_datos),1))\n ew_dat=np.zeros((len(ew_datos),1))\n ew_error=np.zeros((len(ew_datos),1))\n for i,lineas in enumerate(ew_datos):\n ew_date[i],ew_data[i],ew_error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[2]\n ew_x,ew_y = rem_mean(ew_date,ew_data,ew_error)\n ew_ticks,ew_labels = t_ticks(ew_x[0],ew_x[-1],num_ticks)\n ew_y = ew_y *1e5\n up_date=np.zeros((len(up_datos),1))\n up_data=np.zeros((len(up_datos),1))\n up_dat=np.zeros((len(up_datos),1))\n up_error=np.zeros((len(up_datos),1))\n for i,lineas in enumerate(up_datos):\n up_date[i],up_data[i],up_error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[2]\n up_x,up_y = rem_mean(up_date,up_data,up_error)\n up_ticks,up_labels = t_ticks(up_x[0],up_x[-1],num_ticks)\n up_y = up_y *1e5\n elif formato == 'cabral':\n ruta = '/home/oscar/Doctorado/GPS/programas/python/datos_enrique_cabral/'\n ns_file = ruta + 'north_' + estacion.upper()\n ew_file = ruta + 'east_' + estacion.upper()\n up_file = ruta + 'vert_' + estacion.upper()\n ns_archivo=open(ns_file,'r')\n ew_archivo=open(ew_file,'r')\n up_archivo=open(up_file,'r')\n ns_datos=ns_archivo.readlines()[1:]\n ew_datos=ew_archivo.readlines()[1:]\n up_datos=up_archivo.readlines()[1:]\n ns_date=np.zeros((len(ns_datos),1))\n ns_data=np.zeros((len(ns_datos),1))\n ns_dat=np.zeros((len(ns_datos),1))\n ns_error=np.zeros((len(ns_datos),1))\n for i,lineas in enumerate(ns_datos):\n ns_date[i],ns_data[i],ns_error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[8]\n ns_x = ns_date\n ns_y = ns_data\n ns_ticks,ns_labels = t_ticks(ns_x[0],ns_x[-1],num_ticks)\n ew_date=np.zeros((len(ew_datos),1))\n ew_data=np.zeros((len(ew_datos),1))\n ew_dat=np.zeros((len(ew_datos),1))\n ew_error=np.zeros((len(ew_datos),1))\n for i,lineas in enumerate(ew_datos):\n ew_date[i],ew_data[i],ew_error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[8]\n ew_x = ew_date\n ew_y = ew_data\n ew_ticks,ew_labels = t_ticks(ew_x[0],ew_x[-1],num_ticks)\n up_date=np.zeros((len(up_datos),1))\n up_data=np.zeros((len(up_datos),1))\n up_dat=np.zeros((len(up_datos),1))\n up_error=np.zeros((len(up_datos),1))\n for i,lineas in enumerate(up_datos):\n up_date[i],up_data[i],up_error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[6]\n up_x = up_date\n up_y = up_data\n up_ticks,up_labels = t_ticks(up_x[0],up_x[-1],num_ticks)\n else:\n exit('[ERROR] Unrecognized format')\n\n ind = np.where(ns_x >= 2000)\n ns_x = ns_x[ind[0]]\n ns_y = ns_y[ind[0]]\n ind = np.where(ew_x >= 2000)\n ew_x = ew_x[ind[0]]\n ew_y = ew_y[ind[0]]\n ind = np.where(up_x >= 2000)\n up_x = up_x[ind[0]]\n up_y = up_y[ind[0]]\n\n plt.figure(num=None, figsize=(7, 13))\n plt.subplots_adjust(wspace=.05)\n plt.subplot(3,1,1)\n plt.grid()\n plt.plot(ns_x,ns_y,'ro',mec='green',mfc='red',mew=.5,ms=3.0,alpha=0.5)\n plt.ylabel('Milimeters')\n plt.xticks(ns_ticks,ns_labels,rotation=30)\n plt.xlim(ns_x[0], ns_x[-1])\n plt.title('%s - %s' % (estacion.upper(),'NS'))\n plt.subplot(3,1,2)\n plt.grid()\n plt.plot(ew_x,ew_y,'ro',mec='blue',mfc='red',mew=.5,ms=3.0,alpha=0.5)\n plt.ylabel('Milimeters')\n plt.xticks(ew_ticks,ew_labels,rotation=30)\n plt.xlim(ns_x[0], ns_x[-1])\n plt.title('%s - %s' % (estacion.upper(),'EW'))\n plt.subplot(3,1,3)\n plt.grid()\n plt.plot(up_x,up_y,'ro',mec='blue',mfc='green',mew=.5,ms=3.0,alpha=0.5)\n plt.xlabel('Years since %4.1f'% (up_date[0]))\n plt.ylabel('Milimeters')\n plt.xticks(up_ticks,up_labels,rotation=30)\n plt.xlim(ns_x[0], ns_x[-1])\n plt.title('%s - %s' % (estacion.upper(),'UP'))\n plt.subplots_adjust(bottom=0.1, top=0.95, hspace=.43)\n# plt.savefig(estacion.upper()+'_'+formato+'.jpg',dpi=300)\n plt.show()",
"def plot_data(self, filepath=None, time_min=None, time_max=None, title=None,\n electrode=None):\n\n # normalizes the samples x electrodes array containing the EEG data and\n # adds 1 to each row so that the y-axis value corresponds to electrode\n # location in the MNI coordinate (x,y,z) by electrode df containing\n # electrode locations\n\n if self.get_data().shape[0] == 1:\n nii = self.to_nii()\n nii.plot_glass_brain(pdfpath=filepath)\n elif self.get_data().empty:\n fig = plt.figure()\n ax = fig.add_subplot(111, aspect='equal')\n ax.set_facecolor('w')\n ax.set_xlabel(\"time\")\n ax.set_ylabel(\"electrode\")\n if filepath:\n plt.savefig(filename=filepath)\n else:\n plt.show()\n else:\n Y = _normalize_Y(self.data) # self.get_data()) this allows us to plot all the electrodes even the recon ones\n\n if electrode is not None:\n Y = Y.loc[:, electrode]\n if len(Y.shape) > 1:\n for i, column in enumerate(Y):\n Y[column] = Y[column] - int(column) + i\n\n # divide index by sample rate so that index corresponds to time\n if self.sample_rate:\n Y.index = np.divide(Y.index,np.mean(self.sample_rate))\n\n # if a time window is designated index data in that window\n if all([time_min, time_max]):\n mask = (Y.index >= time_min) & (Y.index <= time_max)\n Y = Y[mask]\n\n # if a time window is not designated, default to the first 500 seconds\n else:\n time_min = 0\n time_max = 10\n mask = (Y.index >= time_min) & (Y.index <= time_max)\n Y= Y[mask]\n \n if electrode:\n if len(Y.shape) > 1:\n ax = Y.plot(title=title, lw=.6)\n else:\n ax = Y.plot(title=title, lw=.6, color='k')\n else:\n ax = Y.plot(legend=False, title=title, color='k', lw=.6)\n ax.set_facecolor('w')\n ax.set_xlabel(\"time\")\n ax.set_ylabel(\"electrode\")\n\n if filepath:\n plt.savefig(filename=filepath)\n else:\n plt.show()",
"def plot_data(x):\n if DATA_2D:\n plt.scatter(x[:, 0], x[:, 1])\n plt.show()\n else:\n fig = plt.figure()\n ax = Axes3D(fig)\n ax.scatter(x[:, 0], x[:, 1], x[:, 2])\n ax.set_xlabel('X Label')\n ax.set_ylabel('Y Label')\n ax.set_zlabel('Z Label')\n plt.show()",
"def showPlot3():\n interested_in = [(20,20),(25,16),(40,10),(50,8),(80,5),(100,4)]\n proc_sim_data = []\n for item in interested_in:\n len_sim_data = []\n raw_sim_data = runSimulation(2, 1.0, item[0], item[1], 0.75, 100, Robot, False)\n for mes in raw_sim_data:\n len_sim_data.append(len(mes))\n proc_sim_data.append(sum(len_sim_data)/len(len_sim_data))\n plot([1,1.56,4,6.25,16,25], proc_sim_data)\n title('Dependence of cleaning time on room shape')\n xlabel('ratio of width to height')\n ylabel('mean time (clocks)')\n show()",
"def run(self):\n # fill the x_values,y_values,z_values dictionaries\n if not self.__fillCoordinatesFromSource():\n self.raiseAWarning('Nothing to Plot Yet. Returning.')\n return\n\n self.counter += 1\n if self.counter > 1:\n self.actcm = None\n clusterDict = deepcopy(self.outStreamTypes)\n\n # start plotting.... loop over the plots that need to be included in this figure\n for pltIndex in range(len(self.outStreamTypes)):\n plotSettings = self.options['plotSettings']['plot'][pltIndex]\n if 'gridLocation' in plotSettings:\n x = None\n y = None\n if 'x' in plotSettings['gridLocation']:\n x = list(map(int, plotSettings['gridLocation']['x'].strip().split(' ')))\n else:\n x = None\n if 'y' in plotSettings['gridLocation'].keys():\n y = list(map(int, plotSettings['gridLocation']['y'].strip().split(' ')))\n else:\n y = None\n if pltIndex == 0:\n self.ax.remove() # remove axis so that there is not an extra axis on plot with subplots\n if (len(x) == 1 and len(y) == 1):\n if self.dim == 2:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0], y[0]])\n else:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0], y[0]], projection='3d')\n elif (len(x) == 1 and len(y) != 1):\n if self.dim == 2:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0], y[0]:y[-1]])\n else:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0], y[0]:y[-1]], projection='3d')\n elif (len(x) != 1 and len(y) == 1):\n if self.dim == 2:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0]:x[-1], y[0]])\n else:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0]:x[-1], y[0]], projection='3d')\n else:\n if self.dim == 2:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0]:x[-1], y[0]:y[-1]])\n else:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0]:x[-1], y[0]:y[-1]], projection='3d')\n\n if 'gridSpace' in self.options['plotSettings']:\n self.ax.locator_params(axis='y', nbins=4)\n self.ax.locator_params(axis='x', nbins=2)\n if 'range' in plotSettings:\n axes_range = plotSettings['range']\n if 'ymin' in axes_range:\n self.ax.set_ylim(bottom=ast.literal_eval(axes_range['ymin']))\n if 'ymax' in axes_range:\n self.ax.set_ylim(top=ast.literal_eval(axes_range['ymax']))\n if 'xmin' in axes_range:\n self.ax.set_xlim(left=ast.literal_eval(axes_range['xmin']))\n if 'xmax' in axes_range:\n self.ax.set_xlim(right=ast.literal_eval(axes_range['xmax']))\n if self.dim == 3:\n if 'zmin' in axes_range.options['plotSettings']['plot'][pltIndex]:\n if 'zmax' not in axes_range.options['plotSettings']:\n self.raiseAWarning('zmin inputted but not zmax. zmin ignored! ')\n else:\n self.ax.set_zlim(bottom=ast.literal_eval(axes_range['zmin']), top=ast.literal_eval(self.options['plotSettings']['zmax']))\n if 'zmax' in axes_range:\n if 'zmin' not in axes_range:\n self.raiseAWarning('zmax inputted but not zmin. zmax ignored! ')\n else:\n self.ax.set_zlim(bottom=ast.literal_eval(axes_range['zmin']), top=ast.literal_eval(axes_range['zmax']))\n if 'xlabel' not in plotSettings:\n self.ax.set_xlabel('x')\n else:\n self.ax.set_xlabel(plotSettings['xlabel'])\n if 'ylabel' not in plotSettings:\n self.ax.set_ylabel('y')\n else:\n self.ax.set_ylabel(plotSettings['ylabel'])\n if 'zlabel' in plotSettings:\n if self.dim == 2:\n self.raiseAWarning('zlabel keyword does not make sense in 2-D Plots!')\n elif self.dim == 3 and self.zCoordinates:\n self.ax.set_zlabel(plotSettings['zlabel'])\n elif self.dim == 3 and self.zCoordinates:\n self.ax.set_zlabel('z')\n else:\n if 'xlabel' not in self.options['plotSettings']:\n self.ax.set_xlabel('x')\n else:\n self.ax.set_xlabel(self.options['plotSettings']['xlabel'])\n if 'ylabel' not in self.options['plotSettings']:\n self.ax.set_ylabel('y')\n else:\n self.ax.set_ylabel(self.options['plotSettings']['ylabel'])\n if 'zlabel' in self.options['plotSettings']:\n if self.dim == 2:\n self.raiseAWarning('zlabel keyword does not make sense in 2-D Plots!')\n elif self.dim == 3 and self.zCoordinates:\n self.ax.set_zlabel(self.options['plotSettings']['zlabel'])\n elif self.dim == 3 and self.zCoordinates:\n self.ax.set_zlabel('z')\n\n if 'legend' in self.options['plotSettings']:\n if 'label' not in plotSettings.get('attributes', {}):\n if 'attributes' not in plotSettings:\n plotSettings['attributes'] = {}\n plotSettings['attributes']['label'] = self.outStreamTypes[pltIndex] + ' ' + str(pltIndex)\n #################\n # SCATTER PLOT #\n #################\n self.raiseADebug(f'creating plot {self.name}')\n if self.outStreamTypes[pltIndex] == 'scatter':\n if 's' not in plotSettings:\n plotSettings['s'] = '20'\n if 'c' not in plotSettings:\n plotSettings['c'] = 'b'\n if 'marker' not in plotSettings:\n plotSettings['marker'] = 'o'\n if 'alpha' not in plotSettings:\n plotSettings['alpha'] = 'None'\n if 'linewidths' not in plotSettings:\n plotSettings['linewidths'] = 'None'\n if self.colorMapCoordinates[pltIndex] is not None:\n # Find the max and min colormap values\n firstKey = utils.first(self.xValues[pltIndex].keys())\n vmin = np.amin(self.colorMapValues[pltIndex][firstKey])\n vmax = np.amax(self.colorMapValues[pltIndex][firstKey])\n for key in self.xValues[pltIndex]:\n vmin = min(vmin,np.amin(self.colorMapValues[pltIndex][key]))\n vmax = max(vmax,np.amax(self.colorMapValues[pltIndex][key]))\n plotSettings['norm'] = matplotlib.colors.Normalize(vmin,vmax)\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n for yIndex in range(len(self.yValues[pltIndex][key])):\n scatterPlotOptions = {'s': ast.literal_eval(plotSettings['s']),\n 'marker': (plotSettings['marker']),\n 'alpha': ast.literal_eval(plotSettings['alpha']),\n 'linewidths': ast.literal_eval(plotSettings['linewidths'])}\n if self.colorMapCoordinates[pltIndex] is not None:\n scatterPlotOptions['norm'] = plotSettings['norm']\n scatterPlotOptions.update(plotSettings.get('attributes', {}))\n if self.dim == 2:\n if self.colorMapCoordinates[pltIndex] is not None:\n scatterPlotOptions['c'] = self.colorMapValues[pltIndex][key][xIndex]\n scatterPlotOptions['cmap'] = matplotlib.cm.get_cmap(\"winter\")\n if self.actcm:\n first = False\n else:\n first = True\n if plotSettings['cmap'] == 'None':\n self.actPlot = self.ax.scatter(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n **scatterPlotOptions)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n try:\n self.actcm.draw_all()\n # this is not good, what exception will be thrown?\n except:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n scatterPlotOptions['cmap'] = plotSettings['cmap']\n self.actPlot = self.ax.scatter(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n **scatterPlotOptions)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m, ax=self.ax)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_clim(vmin = min(self.colorMapValues[pltIndex][key][-1]), vmax=max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n else:\n if 'color' not in scatterPlotOptions:\n scatterPlotOptions['c'] = plotSettings['c']\n self.actPlot = self.ax.scatter(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n **scatterPlotOptions)\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n if self.colorMapCoordinates[pltIndex] is not None:\n scatterPlotOptions['c'] = self.colorMapValues[pltIndex][key][zIndex]\n if self.actcm:\n first = False\n else:\n first = True\n if plotSettings['cmap'] == 'None':\n self.actPlot = self.ax.scatter(self.xValues[pltIndex][key][xIndex], self.yValues[pltIndex][key][yIndex], self.zValues[pltIndex][key][zIndex], **scatterPlotOptions)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n self.actcm.draw_all()\n else:\n scatterPlotOptions['cmap'] = plotSettings['cmap']\n self.actPlot = self.ax.scatter(self.xValues[pltIndex][key][xIndex], self.yValues[pltIndex][key][yIndex], self.zValues[pltIndex][key][zIndex], **scatterPlotOptions)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap = self.actPlot.cmap, norm = self.actPlot.norm)\n m.set_clim(vmin = min(self.colorMapValues[pltIndex][key][-1]), vmax=max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n else:\n if 'color' not in scatterPlotOptions:\n scatterPlotOptions['c'] = plotSettings['c']\n self.actPlot = self.ax.scatter(self.xValues[pltIndex][key][xIndex], self.yValues[pltIndex][key][yIndex], self.zValues[pltIndex][key][zIndex], **scatterPlotOptions)\n #################\n # LINE PLOT #\n #################\n elif self.outStreamTypes[pltIndex] == 'line':\n minV = 0\n maxV = 0\n # If the user does not define an appropriate cmap, then use matplotlib's default.\n if 'cmap' not in plotSettings or plotSettings['cmap'] not in matplotlib.cm.datad:\n plotSettings['cmap'] = None\n if bool(self.colorMapValues):\n for key in self.xValues[pltIndex]:\n minV = min(minV,self.colorMapValues[pltIndex][key][-1][-1])\n maxV = max(maxV,self.colorMapValues[pltIndex][key][-1][-1])\n cmap = matplotlib.cm.ScalarMappable(matplotlib.colors.Normalize(minV, maxV, True), plotSettings['cmap'])\n cmap.set_array([minV,maxV])\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n if self.colorMapCoordinates[pltIndex] is not None:\n plotSettings['interpPointsX'] = str(max(200, len(self.xValues[pltIndex][key][xIndex])))\n for yIndex in range(len(self.yValues[pltIndex][key])):\n if self.dim == 2:\n if self.yValues[pltIndex][key][yIndex].size < 2:\n return\n xi, yi = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex], self.yValues[pltIndex][key][yIndex], plotSettings, returnCoordinate=True)\n if self.colorMapCoordinates[pltIndex] is not None:\n self.ax.plot(xi, yi, c=cmap.cmap(self.colorMapValues[pltIndex][key][-1][-1]/(maxV-minV)))\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if self.actcm is None:\n self.actcm = self.fig.colorbar(cmap)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n self.actcm.draw_all()\n else:\n self.actPlot = self.ax.plot(xi, yi, **plotSettings.get('attributes', {}))\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n if self.zValues[pltIndex][key][zIndex].size <= 3:\n return\n if self.colorMapCoordinates[pltIndex] is not None:\n self.ax.plot(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n self.zValues[pltIndex][key][zIndex],\n c=cmap.cmap(self.colorMapValues[pltIndex][key][-1][-1]/(maxV-minV)))\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if self.actcm is None:\n self.actcm = self.fig.colorbar(cmap)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n self.actcm.draw_all()\n else:\n self.actPlot = self.ax.plot(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n self.zValues[pltIndex][key][zIndex],\n **plotSettings.get('attributes', {}))\n ##################\n # HISTOGRAM PLOT #\n ##################\n elif self.outStreamTypes[pltIndex] == 'histogram':\n if 'bins' not in plotSettings:\n if self.dim == 2:\n plotSettings['bins'] = '10'\n else:\n plotSettings['bins'] = '4'\n if 'normed' not in plotSettings:\n plotSettings['normed'] = 'False'\n if 'weights' not in plotSettings:\n plotSettings['weights'] = 'None'\n if 'cumulative' not in plotSettings:\n plotSettings['cumulative'] = 'False'\n if 'histtype' not in plotSettings:\n plotSettings['histtype'] = 'bar'\n if 'align' not in plotSettings:\n plotSettings['align'] = 'mid'\n if 'orientation' not in plotSettings:\n plotSettings['orientation'] = 'vertical'\n if 'rwidth' not in plotSettings:\n plotSettings['rwidth'] = 'None'\n if 'log' not in plotSettings:\n plotSettings['log'] = 'None'\n if 'color' not in plotSettings:\n plotSettings['color'] = 'b'\n if 'stacked' not in plotSettings:\n plotSettings['stacked'] = 'None'\n if self.sourceData[0].type.strip() == 'HistorySet':\n #####################################################################################################################################\n # @MANDD: This 'if' condition has been added in order to allow the user the correctly create an histogram out of an historySet #\n # If the histogram is created out of the input variables, then the plot has an identical meaning of the one generated by a pointSet #\n # However, if the histogram is created out of the output variables, then the plot consider only the last value of the array #\n #####################################################################################################################################\n data = {}\n data['x'] = np.empty(0)\n data['y'] = np.empty(0)\n for index in range(len(self.outStreamTypes)):\n for key in self.xValues[index]:\n data['x'] = np.append(data['x'], self.xValues[index][key][0][-1])\n if self.dim == 3:\n data['y'] = np.append(data['y'], self.yValues[index][key][0][-1])\n del self.xValues[index]\n self.xValues = {}\n self.xValues[index] = {}\n self.xValues[index][0] = []\n self.xValues[index][0].append(deepcopy(data['x']))\n if self.dim == 3:\n del self.yValues[index]\n self.yValues = {}\n self.yValues[index] ={ }\n self.yValues[index][0] = []\n self.yValues[index][0].append(deepcopy(data['y']))\n\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n try:\n colorss = ast.literal_eval(plotSettings['color'])\n # unknown what specific error is anticipated here, but I don't like a bare except...\n # ast.literal_eval can raise the exceptions listed below (see library docs):\n except (ValueError, TypeError, SyntaxError, MemoryError, RecursionError):\n colorss = plotSettings['color']\n if self.dim == 2:\n self.ax.hist(self.xValues[pltIndex][key][xIndex],\n bins=ast.literal_eval(plotSettings['bins']),\n density=ast.literal_eval(plotSettings['normed']),\n weights=ast.literal_eval(plotSettings['weights']),\n cumulative=ast.literal_eval(plotSettings['cumulative']),\n histtype=plotSettings['histtype'],\n align=plotSettings['align'],\n orientation=plotSettings['orientation'],\n rwidth=ast.literal_eval(plotSettings['rwidth']),\n log=ast.literal_eval(plotSettings['log']),\n color=colorss,\n stacked=ast.literal_eval(plotSettings['stacked']),\n **plotSettings.get('attributes', {}))\n else:\n for yIndex in range(len(self.yValues[pltIndex][key])):\n hist, xedges, yedges = np.histogram2d(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n bins=ast.literal_eval(plotSettings['bins']))\n elements = (len(xedges) - 1) * (len(yedges) - 1)\n if 'x_offset' in plotSettings:\n xoffset = float(plotSettings['x_offset'])\n else:\n xoffset = 0.25\n if 'y_offset' in plotSettings:\n yoffset = float(plotSettings['y_offset'])\n else:\n yoffset = 0.25\n if 'dx' in plotSettings:\n dxs = float(plotSettings['dx'])\n else:\n dxs = (self.xValues[pltIndex][key][xIndex].max() - self.xValues[pltIndex][key][xIndex].min()) / float(plotSettings['bins'])\n if 'dy' in plotSettings:\n dys = float(plotSettings['dy'])\n else:\n dys = (self.yValues[pltIndex][key][yIndex].max() - self.yValues[pltIndex][key][yIndex].min()) / float(plotSettings['bins'])\n xpos, ypos = np.meshgrid(xedges[:-1] + xoffset, yedges[:-1] + yoffset)\n self.actPlot = self.ax.bar3d(xpos.flatten(),\n ypos.flatten(),\n np.zeros(elements),\n dxs*np.ones_like(elements),\n dys*np.ones_like(elements),\n hist.flatten(),\n color=colorss,\n zsort='average',\n **plotSettings.get('attributes', {}))\n ##################\n # STEM PLOT #\n ##################\n elif self.outStreamTypes[pltIndex] == 'stem':\n if 'linefmt' not in plotSettings:\n plotSettings['linefmt'] = 'b-'\n if 'markerfmt' not in plotSettings:\n plotSettings['markerfmt'] = 'bo'\n if 'basefmt' not in plotSettings:\n plotSettings['basefmt'] = 'r-'\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n for yIndex in range(len(self.yValues[pltIndex][key])):\n if self.dim == 2:\n self.actPlot = self.ax.stem(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n linefmt=plotSettings['linefmt'],\n markerfmt=plotSettings['markerfmt'],\n basefmt = plotSettings['linefmt'],\n use_line_collection=True,\n **plotSettings.get('attributes', {}))\n else:\n # it is a basic stem plot constructed using a standard line plot. For now we do not use the previous defined keywords...\n for zIndex in range(len(self.zValues[pltIndex][key])):\n for xx, yy, zz in zip(self.xValues[pltIndex][key][xIndex], self.yValues[pltIndex][key][yIndex], self.zValues[pltIndex][key][zIndex]):\n self.ax.plot([xx, xx], [yy, yy], [0, zz], '-')\n ##################\n # STEP PLOT #\n ##################\n elif self.outStreamTypes[pltIndex] == 'step':\n if self.dim == 2:\n if 'where' not in plotSettings:\n plotSettings['where'] = 'mid'\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n if self.xValues[pltIndex][key][xIndex].size < 2:\n xi = self.xValues[pltIndex][key][xIndex]\n else:\n xi = np.linspace(self.xValues[pltIndex][key][xIndex].min(), self.xValues[pltIndex][key][xIndex].max(), ast.literal_eval(plotSettings['interpPointsX']))\n for yIndex in range(len(self.yValues[pltIndex][key])):\n if self.yValues[pltIndex][key][yIndex].size <= 3:\n return\n yi = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex], self.yValues[pltIndex][key][yIndex], plotSettings)\n self.actPlot = self.ax.step(xi, yi, where=plotSettings['where'], **plotSettings.get('attributes', {}))\n else:\n self.raiseAWarning('step Plot not available in 3D')\n return\n ########################\n # PSEUDOCOLOR PLOT #\n ########################\n elif self.outStreamTypes[pltIndex] == 'pseudocolor':\n if self.dim == 2:\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n # Hopefully, x,y, and z are all the same length, so checking this\n # here should be good enough.\n # The problem is you cannot interpolate any amount of space if\n # you only have a single data point.\n if self.xValues[pltIndex][key][xIndex].size == 1:\n self.raiseAWarning('Nothing to Plot Yet. Continuing to next plot.')\n continue\n for yIndex in range(len(self.yValues[pltIndex][key])):\n if not self.colorMapCoordinates:\n self.raiseAMessage('pseudocolor Plot needs coordinates for color map... Returning without plotting')\n return\n for zIndex in range(len(self.colorMapValues[pltIndex][key])):\n if self.colorMapValues[pltIndex][key][zIndex].size <= 3:\n return\n xig, yig, Ci = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.colorMapValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n if plotSettings['cmap'] == 'None':\n self.actPlot = self.ax.pcolormesh(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n **plotSettings.get('attributes', {}))\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n else:\n self.actPlot = self.ax.pcolormesh(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n cmap=matplotlib.cm.get_cmap(name = plotSettings['cmap']),\n **plotSettings.get('attributes', {}))\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(ma.masked_where(np.isnan(Ci), Ci))\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n actcm = self.fig.colorbar(m)\n actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n self.raiseAWarning('pseudocolor Plot is considered a 2D plot, not a 3D!')\n return\n ########################\n # SURFACE PLOT #\n ########################\n elif self.outStreamTypes[pltIndex] == 'surface':\n if self.dim == 2:\n self.raiseAWarning('surface Plot is NOT available for 2D plots, IT IS A 3D!')\n return\n else:\n if 'rstride' not in plotSettings:\n plotSettings['rstride'] = '1'\n if 'cstride' not in plotSettings:\n plotSettings['cstride'] = '1'\n if 'antialiased' not in plotSettings:\n plotSettings['antialiased'] = 'False'\n if 'linewidth' not in plotSettings:\n plotSettings['linewidth'] = '0'\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n # Hopefully, x,y, and z are all the same length, so checking this\n # here should be good enough.\n # The problem is you cannot interpolate any amount of space if\n # you only have a single data point.\n if self.xValues[pltIndex][key][xIndex].size == 1:\n self.raiseAWarning('Nothing to Plot Yet. Continuing to next plot.')\n continue\n for yIndex in range(len(self.yValues[pltIndex][key])):\n for zIndex in range(len(self.zValues[pltIndex][key])):\n if self.zValues[pltIndex][key][zIndex].size <= 3:\n return\n if self.colorMapCoordinates[pltIndex] is not None:\n xig, yig, Ci = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.colorMapValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n xig, yig, zi = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.zValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n if self.colorMapCoordinates[pltIndex] is not None:\n if self.actcm:\n first = False\n else:\n first = True\n if plotSettings['cmap'] == 'None':\n plotSettings['cmap'] = 'jet'\n self.actPlot = self.ax.plot_surface(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n rstride=ast.literal_eval(plotSettings['rstride']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n facecolors=matplotlib.cm.get_cmap(name=plotSettings['cmap'])(ma.masked_where(np.isnan(Ci), Ci)),\n cmap=matplotlib.cm.get_cmap(name = plotSettings['cmap']),\n linewidth=ast.literal_eval(plotSettings['linewidth']),\n antialiased=ast.literal_eval(plotSettings['antialiased']),\n **plotSettings.get('attributes', {}))\n if first:\n self.actPlot.cmap = matplotlib.cm.get_cmap(name=plotSettings['cmap'])\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n m = matplotlib.cm.ScalarMappable(cmap = self.actPlot.cmap, norm = self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_clim(vmin=min(self.colorMapValues[pltIndex][key][-1]), vmax=max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n else:\n if plotSettings['cmap'] == 'None':\n self.actPlot = self.ax.plot_surface(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n rstride=ast.literal_eval(plotSettings['rstride']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n linewidth=ast.literal_eval(plotSettings['linewidth']),\n antialiased=ast.literal_eval(plotSettings['antialiased']),\n **plotSettings.get('attributes', {}))\n if 'color' in plotSettings.get('attributes', {}):\n self.actPlot.set_color = plotSettings.get('attributes', {})['color']\n else:\n self.actPlot.set_color = 'blue'\n else:\n self.actPlot = self.ax.plot_surface(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n rstride=ast.literal_eval(plotSettings['rstride']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n cmap=matplotlib.cm.get_cmap(name = plotSettings['cmap']),\n linewidth=ast.literal_eval(plotSettings['linewidth']),\n antialiased=ast.literal_eval(plotSettings['antialiased']),\n **plotSettings.get('attributes', {}))\n ########################\n # TRI-SURFACE PLOT #\n ########################\n elif self.outStreamTypes[pltIndex] == 'tri-surface':\n if self.dim == 2:\n self.raiseAWarning('TRI-surface Plot is NOT available for 2D plots, it is 3D!')\n return\n else:\n if 'color' not in plotSettings:\n plotSettings['color'] = 'b'\n if 'shade' not in plotSettings:\n plotSettings['shade'] = 'False'\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n # Hopefully, x,y, and z are all the same length, so checking this\n # here should be good enough.\n # The problem is you cannot interpolate any amount of space if\n # you only have a single data point.\n if self.xValues[pltIndex][key][xIndex].size == 1:\n self.raiseAWarning('Nothing to Plot Yet. Continuing to next plot.')\n continue\n for yIndex in range(len(self.yValues[pltIndex][key])):\n for zIndex in range(len(self.zValues[pltIndex][key])):\n metric = (self.xValues[pltIndex][key][xIndex] ** 2 + self.yValues[pltIndex][key][yIndex] ** 2) ** 0.5\n metricIndeces = np.argsort(metric)\n xs = np.zeros(self.xValues[pltIndex][key][xIndex].shape)\n ys = np.zeros(self.yValues[pltIndex][key][yIndex].shape)\n zs = np.zeros(self.zValues[pltIndex][key][zIndex].shape)\n for sindex in range(len(metricIndeces)):\n xs[sindex] = self.xValues[pltIndex][key][xIndex][metricIndeces[sindex]]\n ys[sindex] = self.yValues[pltIndex][key][yIndex][metricIndeces[sindex]]\n zs[sindex] = self.zValues[pltIndex][key][zIndex][metricIndeces[sindex]]\n surfacePlotOptions = {'color': plotSettings['color'],\n 'shade': ast.literal_eval(plotSettings['shade'])}\n surfacePlotOptions.update(plotSettings.get('attributes', {}))\n if self.zValues[pltIndex][key][zIndex].size <= 3:\n return\n if self.colorMapCoordinates[pltIndex] is not None:\n if self.actcm:\n first = False\n else:\n first = True\n if plotSettings['cmap'] == 'None':\n plotSettings['cmap'] = 'jet'\n surfacePlotOptions['cmap'] = matplotlib.cm.get_cmap(name = plotSettings['cmap'])\n self.actPlot = self.ax.plot_trisurf(xs, ys, zs, **surfacePlotOptions)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n self.actPlot.cmap = matplotlib.cm.get_cmap(name=plotSettings['cmap'])\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_clim(vmin=min(self.colorMapValues[pltIndex][key][-1]), vmax=max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n else:\n if plotSettings['cmap'] != 'None':\n surfacePlotOptions[\"cmap\"] = matplotlib.cm.get_cmap(name=plotSettings['cmap'])\n self.actPlot = self.ax.plot_trisurf(xs, ys, zs, **surfacePlotOptions)\n ########################\n # WIREFRAME PLOT #\n ########################\n elif self.outStreamTypes[pltIndex] == 'wireframe':\n if self.dim == 2:\n self.raiseAWarning('wireframe Plot is NOT available for 2D plots, IT IS A 3D!')\n return\n else:\n if 'rstride' not in plotSettings:\n plotSettings['rstride'] = '1'\n if 'cstride' not in plotSettings:\n plotSettings['cstride'] = '1'\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n # Hopefully, x,y, and z are all the same length, so checking this\n # here should be good enough.\n # The problem is you cannot interpolate any amount of space if\n # you only have a single data point.\n if self.xValues[pltIndex][key][xIndex].size == 1:\n self.raiseAWarning('Nothing to Plot Yet. Continuing to next plot.')\n continue\n for yIndex in range(len(self.yValues[pltIndex][key])):\n for zIndex in range(len(self.zValues[pltIndex][key])):\n if self.zValues[pltIndex][key][zIndex].size <= 3:\n return\n if self.colorMapCoordinates[pltIndex] is not None:\n xig, yig, Ci = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.colorMapValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n xig, yig, zi = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.zValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n if self.colorMapCoordinates[pltIndex] is not None:\n self.raiseAWarning(f'Currently, ax.plot_wireframe() in MatPlotLib version: {matplotlib.__version__} does not support a colormap! Wireframe plotted on a surface plot...')\n if self.actcm:\n first = False\n else:\n first = True\n if plotSettings['cmap'] == 'None':\n plotSettings['cmap'] = 'jet'\n self.actPlot = self.ax.plot_wireframe(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n rstride=ast.literal_eval(plotSettings['rstride']),\n cmap=matplotlib.cm.get_cmap(name = plotSettings['cmap']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n **plotSettings.get('attributes', {}))\n self.actPlot = self.ax.plot_surface(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n alpha=0.4,\n rstride=ast.literal_eval(plotSettings['rstride']),\n cmap=matplotlib.cm.get_cmap(name=plotSettings['cmap']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n **plotSettings.get('attributes', {}))\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_clim(vmin=min(self.colorMapValues[pltIndex][key][-1]), vmax=max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n else:\n if plotSettings['cmap'] == 'None':\n self.actPlot = self.ax.plot_wireframe(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n rstride=ast.literal_eval(plotSettings['rstride']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n **plotSettings.get('attributes', {}))\n if 'color' in plotSettings.get('attributes', {}):\n self.actPlot.set_color = plotSettings.get('attributes', {})['color']\n else:\n self.actPlot.set_color = 'blue'\n else:\n self.actPlot = self.ax.plot_wireframe(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n rstride=ast.literal_eval(plotSettings['rstride']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n **plotSettings.get('attributes', {}))\n ########################\n # CONTOUR PLOT #\n ########################\n elif self.outStreamTypes[pltIndex] == 'contour' or self.outStreamTypes[pltIndex] == 'filledContour':\n if self.dim == 2:\n if 'numberBins' in plotSettings:\n nbins = int(plotSettings['numberBins'])\n else:\n nbins = 5\n for key in self.xValues[pltIndex]:\n if not self.colorMapCoordinates:\n self.raiseAWarning(self.outStreamTypes[pltIndex] + ' Plot needs coordinates for color map... Returning without plotting')\n return\n for xIndex in range(len(self.xValues[pltIndex][key])):\n # Hopefully, x,y, and z are all the same length, so checking this\n # here should be good enough.\n # The problem is you cannot interpolate any amount of space if\n # you only have a single data point.\n if self.xValues[pltIndex][key][xIndex].size == 1:\n self.raiseAWarning('Nothing to Plot Yet. Continuing to next plot.')\n continue\n for yIndex in range(len(self.yValues[pltIndex][key])):\n for zIndex in range(len(self.colorMapValues[pltIndex][key])):\n if self.actcm:\n first = False\n else:\n first = True\n xig, yig, Ci = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.colorMapValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n if self.outStreamTypes[pltIndex] == 'contour':\n if plotSettings['cmap'] == 'None':\n if 'color' in plotSettings.get('attributes', {}):\n color = plotSettings.get('attributes', {})['color']\n else:\n color = 'blue'\n self.actPlot = self.ax.contour(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n nbins,\n colors=color,\n **plotSettings.get('attributes', {}))\n else:\n self.actPlot = self.ax.contour(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n nbins,\n **plotSettings.get('attributes', {}))\n else:\n if plotSettings['cmap'] == 'None':\n plotSettings['cmap'] = 'jet'\n self.actPlot = self.ax.contourf(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n nbins,\n **plotSettings.get('attributes', {}))\n self.ax.clabel(self.actPlot, inline=1, fontsize=10)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n self.actcm = self.fig.colorbar(self.actPlot, shrink=0.8, extend='both')\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap = self.actPlot.cmap, norm = self.actPlot.norm)\n m.set_clim(vmin = min(self.colorMapValues[pltIndex][key][-1]), vmax = max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n else:\n self.raiseAWarning('contour/filledContour is a 2-D plot, where x,y are the surface coordinates and colorMap vector is the array to visualize!\\n contour3D/filledContour3D are 3-D! ')\n return\n # These should be combined: ^^^ & vvv\n elif self.outStreamTypes[pltIndex] == 'contour3D' or self.outStreamTypes[pltIndex] == 'filledContour3D':\n if self.dim == 2:\n self.raiseAWarning('contour3D/filledContour3D Plot is NOT available for 2D plots, IT IS A 2D! Check \"contour/filledContour\"!')\n return\n else:\n if 'numberBins' in plotSettings:\n nbins = int(plotSettings['numberBins'])\n else:\n nbins = 5\n if 'extend3D' in plotSettings:\n ext3D = bool(plotSettings['extend3D'])\n else:\n ext3D = False\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n # Hopefully, x,y, and z are all the same length, so checking this\n # here should be good enough.\n # The problem is you cannot interpolate any amount of space if\n # you only have a single data point.\n if self.xValues[pltIndex][key][xIndex].size == 1:\n self.raiseAWarning('Nothing to Plot Yet. Continuing to next plot.')\n continue\n for yIndex in range(len(self.yValues[pltIndex][key])):\n for zIndex in range(len(self.colorMapValues[pltIndex][key])):\n if self.actcm:\n first = False\n else:\n first = True\n xig, yig, Ci = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.colorMapValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n if self.outStreamTypes[pltIndex] == 'contour3D':\n if plotSettings['cmap'] == 'None':\n if 'color' in plotSettings.get('attributes', {}):\n color = plotSettings.get('attributes', {})['color']\n else:\n color = 'blue'\n self.actPlot = self.ax.contour3D(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n nbins,\n colors=color,\n extend3d=ext3D,\n **plotSettings.get('attributes', {}))\n else:\n self.actPlot = self.ax.contour3D(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n nbins,\n extend3d=ext3D,\n cmap=matplotlib.cm.get_cmap(name=plotSettings['cmap']),\n **plotSettings.get('attributes', {}))\n else:\n if plotSettings['cmap'] == 'None':\n plotSettings['cmap'] = 'jet'\n self.actPlot = self.ax.contourf3D(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n nbins,\n cmap=matplotlib.cm.get_cmap(name=plotSettings['cmap']),\n **plotSettings.get('attributes', {}))\n self.ax.clabel(self.actPlot, inline=1, fontsize=10)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n self.actcm = self.fig.colorbar(self.actPlot, shrink = 0.8, extend = 'both')\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap = self.actPlot.cmap, norm = self.actPlot.norm)\n m.set_clim(vmin = min(self.colorMapValues[pltIndex][key][-1]), vmax = max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n ########################\n # DataMining PLOT #\n ########################\n elif self.outStreamTypes[pltIndex] == 'dataMining':\n colors = cycle(['#88CCEE', '#DDCC77', '#AA4499', '#117733', '#332288', '#999933', '#44AA99', '#882255', '#CC6677', '#CD6677', '#DC6877', '#886677', '#AA6677', '#556677', '#CD7865'])\n if 's' not in plotSettings:\n plotSettings['s'] = '20'\n if 'c' not in plotSettings:\n plotSettings['c'] = 'b'\n if 'marker' not in plotSettings:\n plotSettings['marker'] = 'o'\n if 'alpha' not in plotSettings:\n plotSettings['alpha'] = 'None'\n if 'linewidths' not in plotSettings:\n plotSettings['linewidths'] = 'None'\n clusterDict[pltIndex] = {}\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n for yIndex in range(len(self.yValues[pltIndex][key])):\n dataMiningPlotOptions = {'s': ast.literal_eval(plotSettings['s']),\n 'marker': (plotSettings['marker']),\n 'alpha': ast.literal_eval(plotSettings['alpha']),\n 'linewidths': ast.literal_eval(plotSettings['linewidths'])}\n if self.colorMapCoordinates[pltIndex] is not None:\n self.raiseAWarning('ColorMap values supplied, however DataMining plots do not use colorMap from input.')\n if plotSettings['cmap'] == 'None':\n self.raiseAWarning('ColorSet supplied, however DataMining plots do not use color set from input.')\n if 'cluster' == plotSettings['SKLtype']:\n # TODO: include the cluster Centers to the plot\n if 'noClusters' in plotSettings.get('attributes', {}):\n clusterDict[pltIndex]['noClusters'] = int(plotSettings.get('attributes', {})['noClusters'])\n plotSettings.get('attributes', {}).pop('noClusters')\n else:\n clusterDict[pltIndex]['noClusters'] = np.amax(self.clusterValues[pltIndex][1][0]) + 1\n dataMiningPlotOptions.update(plotSettings.get('attributes', {}))\n if self.dim == 2:\n clusterDict[pltIndex]['clusterValues'] = np.zeros(shape=(len(self.xValues[pltIndex][key][xIndex]), 2))\n else:\n clusterDict[pltIndex]['clusterValues'] = np.zeros(shape=(len(self.xValues[pltIndex][key][xIndex]), 3))\n clusterDict[pltIndex]['clusterValues'][:, 0] = self.xValues[pltIndex][key][xIndex]\n clusterDict[pltIndex]['clusterValues'][:, 1] = self.yValues[pltIndex][key][yIndex]\n if self.dim == 2:\n for k, col in zip(range(int(clusterDict[pltIndex]['noClusters'])), colors):\n myMembers = self.clusterValues[pltIndex][1][0] == k\n self.actPlot = self.ax.scatter(clusterDict[pltIndex]['clusterValues'][myMembers, 0],\n clusterDict[pltIndex]['clusterValues'][myMembers, 1],\n color=col,\n **dataMiningPlotOptions)\n\n # Handle all of the outlying data\n myMembers = self.clusterValues[pltIndex][1][0] == -1\n # resize the points\n dataMiningPlotOptions['s'] /= 2\n # and hollow out their markers\n if 'facecolors' in dataMiningPlotOptions:\n faceColors = dataMiningPlotOptions['facecolors']\n else:\n faceColors = None\n dataMiningPlotOptions['facecolors'] = 'none'\n\n self.actPlot = self.ax.scatter(clusterDict[pltIndex]['clusterValues'][myMembers, 0],\n clusterDict[pltIndex]['clusterValues'][myMembers, 1],\n color='#000000',\n **dataMiningPlotOptions)\n\n # Restore the plot options to their original values\n dataMiningPlotOptions['s'] *= 2\n if faceColors is not None:\n dataMiningPlotOptions['facecolors'] = faceColors\n else:\n del dataMiningPlotOptions['facecolors']\n\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n clusterDict[pltIndex]['clusterValues'][:, 2] = self.zValues[pltIndex][key][zIndex]\n for k, col in zip(range(int(clusterDict[pltIndex]['noClusters'])), colors):\n myMembers = self.clusterValues[pltIndex][1][0] == k\n self.actPlot = self.ax.scatter(clusterDict[pltIndex]['clusterValues'][myMembers, 0],\n clusterDict[pltIndex]['clusterValues'][myMembers, 1],\n clusterDict[pltIndex]['clusterValues'][myMembers, 2],\n color=col,\n **dataMiningPlotOptions)\n\n # Handle all of the outlying data\n myMembers = self.clusterValues[pltIndex][1][0] == -1\n # resize the points\n dataMiningPlotOptions['s'] /= 2\n # and hollow out their markers\n if 'facecolors' in dataMiningPlotOptions:\n faceColors = dataMiningPlotOptions['facecolors']\n else:\n faceColors = None\n dataMiningPlotOptions['facecolors'] = 'none'\n\n self.actPlot = self.ax.scatter(clusterDict[pltIndex]['clusterValues'][myMembers, 0],\n clusterDict[pltIndex]['clusterValues'][myMembers, 1],\n clusterDict[pltIndex]['clusterValues'][myMembers, 2],\n color='#000000',\n **dataMiningPlotOptions)\n\n # Restore the plot options to their original values\n dataMiningPlotOptions['s'] *= 2\n if faceColors is not None:\n dataMiningPlotOptions['facecolors'] = faceColors\n else:\n del dataMiningPlotOptions['facecolors']\n\n elif 'bicluster' == plotSettings['SKLtype']:\n self.raiseAnError(IOError, 'SKLType Bi-Cluster Plots are not implemented yet!..')\n elif 'mixture' == plotSettings['SKLtype']:\n if 'noMixtures' in plotSettings.get('attributes', {}):\n clusterDict[pltIndex]['noMixtures'] = int(plotSettings.get('attributes', {})['noMixtures'])\n plotSettings.get('attributes', {}).pop('noMixtures')\n else:\n clusterDict[pltIndex]['noMixtures'] = np.amax(self.mixtureValues[pltIndex][1][0]) + 1\n if self.dim == 3:\n self.raiseAnError(IOError, 'SKLType Mixture Plots are only available in 2-Dimensions')\n else:\n clusterDict[pltIndex]['mixtureValues'] = np.zeros(shape = (len(self.xValues[pltIndex][key][xIndex]), 2))\n clusterDict[pltIndex]['mixtureValues'][:, 0] = self.xValues[pltIndex][key][xIndex]\n clusterDict[pltIndex]['mixtureValues'][:, 1] = self.yValues[pltIndex][key][yIndex]\n if 'mixtureCovars' in plotSettings.get('attributes', {}):\n split = self.__splitVariableNames('mixtureCovars', (pltIndex, 0))\n # mixtureCovars = self.sourceData[pltIndex].getParam(split[1], split[2], nodeId = 'ending')\n plotSettings.get('attributes', {}).pop('mixtureCovars')\n # else:\n # mixtureCovars = None\n if 'mixtureMeans' in plotSettings.get('attributes', {}):\n split = self.__splitVariableNames('mixtureMeans', (pltIndex, 0))\n # mixtureMeans = self.sourceData[pltIndex].getParam(split[1], split[2], nodeId = 'ending')\n plotSettings.get('attributes', {}).pop('mixtureMeans')\n # else:\n # mixtureMeans = None\n # mixtureCovars.reshape(3, 4)\n # mixtureMeans.reshape(3, 4)\n # for i, (mean, covar, col) in enumerate(zip(mixtureMeans, mixtureCovars, colors)):\n for i, col in zip(range(clusterDict[pltIndex]['noMixtures']), colors):\n if not np.any(self.mixtureValues[pltIndex][1][0] == i):\n continue\n myMembers = self.mixtureValues[pltIndex][1][0] == i\n self.actPlot = self.ax.scatter(clusterDict[pltIndex]['mixtureValues'][myMembers, 0],\n clusterDict[pltIndex]['mixtureValues'][myMembers, 1],\n color=col,\n **dataMiningPlotOptions)\n elif 'manifold' == plotSettings['SKLtype']:\n if self.dim == 2:\n manifoldValues = np.zeros(shape=(len(self.xValues[pltIndex][key][xIndex]), 2))\n else:\n manifoldValues = np.zeros(shape=(len(self.xValues[pltIndex][key][xIndex]), 3))\n manifoldValues[:, 0] = self.xValues[pltIndex][key][xIndex]\n manifoldValues[:, 1] = self.yValues[pltIndex][key][yIndex]\n if 'clusterLabels' in plotSettings.get('attributes', {}):\n split = self.__splitVariableNames('clusterLabels', (pltIndex, 0))\n clusterDict[pltIndex]['clusterLabels'] = self.sourceData[pltIndex].getParam(split[1], split[2], nodeId = 'ending')\n plotSettings.get('attributes', {}).pop('clusterLabels')\n else:\n clusterDict[pltIndex]['clusterLabels'] = None\n if 'noClusters' in plotSettings.get('attributes', {}):\n clusterDict[pltIndex]['noClusters'] = int(plotSettings.get('attributes', {})['noClusters'])\n plotSettings.get('attributes', {}).pop('noClusters')\n else:\n clusterDict[pltIndex]['noClusters'] = np.amax(self.clusterValues[pltIndex][1][0]) + 1\n if self.clusterValues[pltIndex][1][0] is not None:\n if self.dim == 2:\n for k, col in zip(range(clusterDict[pltIndex]['noClusters']), colors):\n myMembers = self.clusterValues[pltIndex][1][0] == k\n self.actPlot = self.ax.scatter(manifoldValues[myMembers, 0],\n manifoldValues[myMembers, 1],\n color=col,\n **dataMiningPlotOptions)\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n manifoldValues[:, 2] = self.zValues[pltIndex][key][zIndex]\n for k, col in zip(range(clusterDict[pltIndex]['noClusters']), colors):\n myMembers = self.clusterValues[pltIndex][1][0] == k\n self.actPlot = self.ax.scatter(manifoldValues[myMembers, 0],\n manifoldValues[myMembers, 1],\n manifoldValues[myMembers, 2],\n color=col,\n **dataMiningPlotOptions)\n else:\n if self.dim == 2:\n self.actPlot = self.ax.scatter(manifoldValues[:, 0],\n manifoldValues[:, 1],\n **dataMiningPlotOptions)\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n manifoldValues[:, 2] = self.zValues[pltIndex][key][zIndex]\n self.actPlot = self.ax.scatter(manifoldValues[:, 0],\n manifoldValues[:, 1],\n manifoldValues[:, 2],\n **dataMiningPlotOptions)\n elif 'decomposition' == plotSettings['SKLtype']:\n if self.dim == 2:\n decompositionValues = np.zeros(shape = (len(self.xValues[pltIndex][key][xIndex]), 2))\n else:\n decompositionValues = np.zeros(shape = (len(self.xValues[pltIndex][key][xIndex]), 3))\n decompositionValues[:, 0] = self.xValues[pltIndex][key][xIndex]\n decompositionValues[:, 1] = self.yValues[pltIndex][key][yIndex]\n if 'noClusters' in plotSettings.get('attributes', {}):\n clusterDict[pltIndex]['noClusters'] = int(plotSettings.get('attributes', {})['noClusters'])\n plotSettings.get('attributes', {}).pop('noClusters')\n else:\n clusterDict[pltIndex]['noClusters'] = np.amax(self.clusterValues[pltIndex][1][0]) + 1\n if self.clusterValues[pltIndex][1][0] is not None:\n if self.dim == 2:\n for k, col in zip(range(clusterDict[pltIndex]['noClusters']), colors):\n myMembers = self.clusterValues[pltIndex][1][0] == k\n self.actPlot = self.ax.scatter(decompositionValues[myMembers, 0],\n decompositionValues[myMembers, 1],\n color=col,\n **dataMiningPlotOptions)\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n decompositionValues[:, 2] = self.zValues[pltIndex][key][zIndex]\n for k, col in zip(range(clusterDict[pltIndex]['noClusters']), colors):\n myMembers = self.clusterValues[pltIndex][1][0] == k\n self.actPlot = self.ax.scatter(decompositionValues[myMembers, 0],\n decompositionValues[myMembers, 1],\n decompositionValues[myMembers, 2],\n color=col,\n **dataMiningPlotOptions)\n else:\n # no ClusterLabels\n if self.dim == 2:\n self.actPlot = self.ax.scatter(decompositionValues[:, 0],\n decompositionValues[:, 1],\n **dataMiningPlotOptions)\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n decompositionValues[:, 2] = self.zValues[pltIndex][key][zIndex]\n self.actPlot = self.ax.scatter(decompositionValues[:, 0],\n decompositionValues[:, 1],\n decompositionValues[:, 2],\n **dataMiningPlotOptions)\n else:\n # Let's try to \"write\" the code for the plot on the fly\n self.raiseAWarning('Trying to create a non-predefined plot of type ' + self.outStreamTypes[pltIndex] + '. If this fails, please refer to the and/or the related matplotlib method specification.')\n kwargs = {}\n for kk in plotSettings:\n if kk != 'attributes' and kk != self.outStreamTypes[pltIndex]:\n try:\n kwargs[kk] = ast.literal_eval(plotSettings[kk])\n except ValueError:\n kwargs[kk] = plotSettings[kk]\n try:\n if self.dim == 2:\n customFunctionCall = getattr(self.ax, self.outStreamTypes[pltIndex])\n else:\n customFunctionCall = getattr(self.ax, self.outStreamTypes[pltIndex])\n self.actPlot = customFunctionCall(**kwargs)\n except AttributeError as ae:\n self.raiseAnError(RuntimeError, '<' + str(ae) + '> -> in execution custom plot \"' + self.outStreamTypes[pltIndex] + '\" in Plot ' + self.name + '.\\nSTREAM MANAGER: ERROR -> command has been called in the following way: ' + 'ax.' + self.outStreamTypes[pltIndex])\n\n if 'legend' in self.options['plotSettings']:\n self.fig.legend(**self.options['plotSettings']['legend'])\n\n # SHOW THE PICTURE\n self.__executeActions()\n self.fig.canvas.draw_idle()\n\n if 'screen' in self.destinations and display:\n def handle_close(event):\n \"\"\"\n This method is aimed to handle the closing of figures (overall when in interactive mode)\n @ In, event, instance, the event to close\n @ Out, None\n \"\"\"\n self.fig.canvas.stop_event_loop()\n self.raiseAMessage('Closed Figure')\n self.fig.canvas.mpl_connect('close_event', handle_close)\n # self.plt.pause(1e-6)\n # The following code is extracted from pyplot.pause without actually\n # needing to force the code to sleep, according to MPL's documentation,\n # this feature is experimental, hopefully by not calling the pause\n # function, we can obtain consistent results.\n # We are skipping a few of the sanity checks done in that function,\n # since we are sure we have an interactive backend and access to the\n # correct type of canvas and figure.\n self.fig.canvas.draw()\n # If your graphs are unresponsive to user input, you may want to consider\n # adjusting this timeout, to allow more time for the input to be handled.\n self.fig.canvas.start_event_loop(1e-3)\n\n # self.fig.canvas.flush_events()\n\n for fileType in self.destinations:\n if fileType == 'screen':\n continue\n\n if not self.overwrite:\n prefix = str(self.counter) + '-'\n else:\n prefix = ''\n\n if len(self.filename) > 0:\n name = self.filename\n else:\n name = prefix + self.name + '_' + str(self.outStreamTypes).replace(\"'\", \"\").replace(\"[\", \"\").replace(\"]\", \"\").replace(\",\", \"-\").replace(\" \", \"\")\n\n if self.subDirectory is not None:\n name = os.path.join(self.subDirectory,name)\n\n self.fig.savefig(name + '.' + fileType, format=fileType)\n\n if 'screen' not in self.destinations:\n plt.close(fig=self.fig)\n\n gc.collect()",
"def plot_data(self):",
"def plot_control_loops(data):\n plot_attitude_rate_loops(data)\n plot_attitude_loops(data)\n plot_velocity_loops(data)\n plot_position_loops(data)",
"def show_data(self):\n\n self.area_canvas.axes.cla()\n self.draw_scatterplot(self.scatter_canvas, 'x [µm]', 'y [µm]', self.p_inputs['flip y-axis'].isChecked())\n self.draw_hist(self.area_canvas, 'area', 'cluster area [µm²]', 'number of clusters')\n self.draw_hist(self.number_canvas, 'nclusters', 'number of cluster', 'number of regions')\n self.draw_hist(self.density_canvas, 'density', 'cluster density [µm⁻²]', 'number of clusters')\n self.draw_hist(self.percentage_canvas, 'pclustered', 'percentage clustered',\n 'number of regions')\n self.draw_hist(self.ratio_canvas, 'reldensity', 'relative density clusters/background',\n 'number of regions')",
"def plot_arias(stream, axes=None, axis_index=None,\n figsize=None, file=None, minfontsize=14, show=False,\n show_maximum=True, title=None, xlabel=None, ylabel=None):\n if len(stream) < 1:\n raise Exception('No traces contained within the provided stream.')\n\n stream = get_acceleration(stream, units='m/s/s')\n Ia = calculate_arias(stream, ['channels'], True)[0]\n\n starttime = stream[0].stats.starttime\n if title is None:\n title = 'Event on ' + str(starttime.month) + '/' + str(starttime.day) + '/' + \\\n str(starttime.year)\n if xlabel is None:\n xlabel = 'Time (s)'\n if ylabel is None:\n ylabel = 'Ia (m/s)'\n\n if figsize is None:\n figsize = (6.5, 7.5)\n if axes is None:\n fig, axs = plt.subplots(len(Ia), 1, figsize=figsize)\n axis_numbers = np.linspace(0, len(Ia) - 1, len(Ia))\n elif axis_index is not None:\n axs = axes\n axis_numbers = np.linspace(axis_index, axis_index + len(Ia) - 1, len(Ia))\n for idx, trace in zip(axis_numbers.astype(int), Ia):\n ax = axs[idx]\n dt = trace.stats['delta']\n npts = len(trace.data)\n t = np.linspace(0, (npts-1)*dt, num=npts)\n network = trace.stats['network']\n station = trace.stats['station']\n channel = trace.stats['channel']\n trace_label = network + '.' + station + '.' + channel\n ax.set_title(trace_label, fontsize=minfontsize)\n ax.plot(t, trace.data)\n if show_maximum:\n abs_arr = np.abs(trace.data.copy())\n idx = np.argmax(abs_arr)\n max_value = abs_arr[idx]\n ax.plot([t[idx]], [trace.data[idx]], marker='o', color=\"red\")\n ax.annotate('%.2E' % max_value, (t[idx], trace.data[idx]),\n xycoords='data', xytext=(.85, 0.25),\n textcoords='axes fraction',\n arrowprops=dict(facecolor='black',\n shrink=0.05, width=1, headwidth=4),\n horizontalalignment='right', verticalalignment='top')\n ax.set_xlabel(xlabel, fontsize=minfontsize)\n ax.set_ylabel(ylabel, fontsize=minfontsize)\n ax.xaxis.set_tick_params(labelsize=minfontsize - 2)\n ax.yaxis.set_tick_params(labelsize=minfontsize - 2)\n plt.suptitle(title, y=1.01, fontsize=minfontsize + 4)\n plt.tight_layout()\n if show and axes is None:\n plt.show()\n if file is not None and axes is None:\n fig.savefig(file, format='png')\n return axs",
"def plot_data(array_list, params):\n\tkey = array_list[0]\n\tvals = array_list[1]\n\tprint key\n\tprint len(vals)\n\tfigure_id = 1\n\tfor item in params:\n\t\tx_axis = get_by_keys(item['x_keys'], key, vals)\n\t\ty_axis = get_by_keys(item['y_keys'], key, vals)\n\t\tplt.figure(figure_id)\n\t\tplt.plot(x_axis, y_axis)\n\t\tfigure_id = figure_id + 1\n\tplt.show()",
"def generate_plots(self):\n freq_to_channel = {v: k for k, v in self.frequency_dict.iteritems()}\n data_axes = None\n for index, frequency in enumerate(sorted(freq_to_channel)):\n channel = freq_to_channel[frequency]\n td_f = self.frequency_dict[channel]\n title = 'Volume Backscatter (Sv) :Channel #%d: Frequency: %.1f kHz' % (channel, td_f)\n data_axes = self._generate_plot(self.ax[index], self.power_data_dict[channel], title,\n self.min_db, self.max_db)\n\n if data_axes:\n self._display_x_labels(self.ax[2], self.data_times)\n self.fig.tight_layout(rect=[0, 0.0, 0.97, 1.0])\n self._display_colorbar(self.fig, data_axes)",
"def plot(axes, axis, values, c='chartreuse'):\n a = axes[axis]\n a.set_xlabel('time (s)')\n x = np.array(range(len(values))) / 1000\n dim = 'x' if axis == 0 else 'y' if axis == 1 else 'z'\n a.set_title('-'.join([dim, 'acceleration']))\n a.plot(x, values / 1000, c=c)"
] | [
"0.6957092",
"0.6196753",
"0.6050141",
"0.6022858",
"0.5895816",
"0.5823652",
"0.57994175",
"0.57699615",
"0.57496446",
"0.572262",
"0.5720663",
"0.56938666",
"0.5690304",
"0.56666124",
"0.56643283",
"0.56529903",
"0.5643261",
"0.5638599",
"0.56349975",
"0.56215554",
"0.56083584",
"0.5597804",
"0.5593497",
"0.5574915",
"0.55743456",
"0.5560262",
"0.55236334",
"0.55095035",
"0.55036896",
"0.5503577"
] | 0.7285975 | 0 |
Plots 3Axis accelerometer data on seperate graphs per sensor but displays them all in one figure. Takes a numpy array of 3Axis Accelerometer data of the form [X1,Y1,Z1,Time,X2,Y2,Z2,Time.....XN,YN,ZN,Time] with any number of rows that relate to the number of samples for each sensor and N defined by the NO_SENSORS(int) parameter. A numpy array of dimension [n][(N4)] should therfore be provided with data(numpy array). The dataSeelction parameter should be 0 or 1 and sets the Y axis to either 01024 ADC or 33 g respectively. | def plot_singlefig(data,NO_SENSORS,dataSelection):
# Axis options
yAxisLimits = [[0,1024],[-3,3]]
# Plots graphs for each sensor on 1 figure
plt.figure(1)
for i in range(0,NO_SENSORS):
# The figure is seperated into subplots using the parameter. 231 means 2 rows, 3 columns, subplot 1
plt.subplot(231 + i)
plt.title('Sensor ' + str(i + 1))
plt.plot(data[:,(3 + (4 * i))],data[:,(0 + (4 * i))],label='X Axis')
plt.plot(data[:,(3 + (4 * i))],data[:,(1 + (4 * i))],label='Y Axis')
plt.plot(data[:,(3 + (4 * i))],data[:,(2 + (4 * i))],label='Z Axis')
plt.ylim(yAxisLimits[dataSelection][0],yAxisLimits[dataSelection][1])
plt.xlabel('Time/s')
plt.ylabel('Acceleration/g')
plt.legend()
plt.show() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_multifig(data,NO_SENSORS,dataSelection):\n \n # Axis options\n yAxisLimits = [[0,1024],[-3,3]]\n \n # Plots a seperate graph for each sensor\n for i in range(0,NO_SENSORS):\n plt.figure(i + 1)\n plt.title('Sensor ' + str(i + 1))\n plt.plot(data[:,(3 + (4 * i))],data[:,(0 + (4 * i))],label='X Axis')\n plt.plot(data[:,(3 + (4 * i))],data[:,(1 + (4 * i))],label='Y Axis')\n plt.plot(data[:,(3 + (4 * i))],data[:,(2 + (4 * i))],label='Z Axis')\n plt.ylim(yAxisLimits[dataSelection][0],yAxisLimits[dataSelection][1])\n plt.xlabel('Time/s')\n plt.ylabel('Acceleration/g')\n plt.legend()\n plt.show()",
"def plot_channels(self, data_array):\n\n plt.figure()\n for p in range(1, 7):\n plt.subplot(6, 1, p)\n plt.plot(data_array[p-1, :])\n\n plt.draw()\n plt.show()\n return",
"def initialiseData(self):\n self.currentPosition = 0\n self.xs = scipy.linspace(0.0, self.numberOfPoints*self.resolution, self.numberOfPoints)\n self.cursorXS = self.getCurrentPositionArray()\n self.cursorVertical = scipy.array([self.verticalLimit,0.0])\n self.array0 = scipy.zeros(self.numberOfPoints)\n self.array1 = scipy.zeros(self.numberOfPoints)\n self.array2 = scipy.zeros(self.numberOfPoints)\n self.array3 = scipy.zeros(self.numberOfPoints)\n self.array4 = scipy.zeros(self.numberOfPoints)\n self.array5 = scipy.zeros(self.numberOfPoints)\n self.array6 = scipy.zeros(self.numberOfPoints)\n self.array7 = scipy.zeros(self.numberOfPoints)\n self.channels = [self.array0,self.array1,self.array2,self.array3,\n self.array4,self.array5,self.array6,self.array7]\n self.arrayPlotData = chaco.ArrayPlotData(xs=self.xs,channel0=self.array0,channel1=self.array1,\n channel2=self.array2,channel3=self.array3,\n channel4=self.array4,channel5=self.array5,\n channel6=self.array6,channel7=self.array7,\n cursorXS = self.cursorXS, cursorVertical=self.cursorVertical)#will be the ArrayPlotData We need",
"def data_vis():\n dataroot = 'solar_data.txt'\n debug = False \n diff = False\n X, y = read_data(dataroot, debug, diff)\n\n # First plot the original timeseries\n fig = plt.figure(figsize=(40,40))\n\n fig.add_subplot(3,3,1)\n plt.plot(y)\n plt.title('Avg Global PSP (vent/cor) [W/m^2]')\n # plt.show()\n\n fig.add_subplot(3,3,2)\n plt.plot(X[:,0])\n plt.title('Avg Zenith Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,3)\n plt.plot(X[:,1])\n plt.title('Avg Azimuth Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,4)\n plt.plot(X[:,2])\n plt.title('Avg Tower Dry Bulb Temp [deg C]')\n # plt.show()\n\n fig.add_subplot(3,3,5)\n plt.plot(X[:,3])\n plt.title('Avg Tower RH [%]')\n # plt.show()\n\n fig.add_subplot(3,3,6)\n plt.plot(X[:,4])\n plt.title('Avg Total Cloud Cover [%]')\n # plt.show()\n\n fig.add_subplot(3,3,7)\n plt.plot(X[:,5])\n plt.title('Avg Avg Wind Speed @ 6ft [m/s]')\n # plt.show()\n\n ##########################################################################################\n # Plotting the Fourier Transform of the signals\n\n freq = np.fft.fftfreq(len(y), 1*60*60)\n\n fig = plt.figure(figsize=(40,40))\n\n fig.add_subplot(3,3,1)\n plt.plot(freq, np.abs(np.fft.fft(y)))\n plt.title('Avg Global PSP (vent/cor) [W/m^2]')\n # plt.show()\n\n fig.add_subplot(3,3,2)\n plt.plot(freq, np.abs(np.fft.fft(X[:,0])))\n plt.title('Avg Zenith Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,3)\n plt.plot(freq, np.abs(np.fft.fft(X[:,1])))\n plt.title('Avg Azimuth Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,4)\n plt.plot(freq, np.abs(np.fft.fft(X[:,2])))\n plt.title('Avg Tower Dry Bulb Temp [deg C]')\n # plt.show()\n\n fig.add_subplot(3,3,5)\n plt.plot(freq, np.abs(np.fft.fft(X[:,3])))\n plt.title('Avg Tower RH [%]')\n # plt.show()\n\n fig.add_subplot(3,3,6)\n plt.plot(freq, np.abs(np.fft.fft(X[:,4])))\n plt.title('Avg Total Cloud Cover [%]')\n # plt.show()\n\n fig.add_subplot(3,3,7)\n plt.plot(freq, np.abs(np.fft.fft(X[:,5])))\n plt.title('Avg Avg Wind Speed @ 6ft [m/s]')\n # plt.show()\n\n ##################################################################################################\n # Print correlation matrix\n\n df = pd.DataFrame(np.c_[y, X])\n df.columns = ['Avg Global PSP (vent/cor) [W/m^2]','Avg Zenith Angle [degrees]','Avg Azimuth Angle [degrees]','Avg Tower Dry Bulb Temp [deg C]','Avg Tower RH [%]','Avg Total Cloud Cover [%]','Avg Avg Wind Speed @ 6ft [m/s]']\n f = plt.figure(figsize=(19, 15))\n plt.matshow(df.corr(), fignum=f.number)\n plt.xticks(range(df.shape[1]), df.columns, fontsize=14, rotation=20)\n plt.yticks(range(df.shape[1]), df.columns, fontsize=14)\n cb = plt.colorbar()\n cb.ax.tick_params(labelsize=14)\n plt.title('Correlation Matrix', fontsize=16);\n plt.show()",
"def updateArrayPlotData(self):\n self.arrayPlotData.set_data(\"channel0\",self.array0)\n self.arrayPlotData.set_data(\"channel1\",self.array1)\n self.arrayPlotData.set_data(\"channel2\",self.array2)\n self.arrayPlotData.set_data(\"channel3\",self.array3)\n self.arrayPlotData.set_data(\"channel4\",self.array4)\n self.arrayPlotData.set_data(\"channel5\",self.array5)\n self.arrayPlotData.set_data(\"channel6\",self.array6)\n self.arrayPlotData.set_data(\"channel7\",self.array7)\n self.arrayPlotData.set_data(\"cursorXS\",self.cursorXS)\n #self.arrayPlotData.set_data(\"cursorVertical\",self.cursorVertical)",
"def main():\n data = load_data()\n # BNO055 absolute orientation sensor\n bno_time = data[2].index / 1e6\n bno_accel_axes = [data[2][bno_str] / 9.8 for bno_str in BNO_ACCEL]\n plot_multi_axis(bno_time, bno_accel_axes,\n labels=['BNO055 Acceleration',\n 'Time (s)', 'Acceleration (G)',\n [*BNO_ACCEL, 'magnitude']],\n fname='bno_accel.html')\n bno_gyro_axes = [data[2][bno_str] for bno_str in BNO_GYRO]\n plot_multi_axis(bno_time, bno_gyro_axes,\n labels=['BNO055 Roll Rate',\n 'Time (s)', 'Roll Rate (deg/s)',\n [*BNO_GYRO, 'magnitude']],\n fname='bno_gyro.html')\n # MMA65XX high-range accelerometer\n mma_time = data[1].index / 1e6\n mma_axes = [data[1][mma_str] / 9.8 for mma_str in MMA]\n plot_multi_axis(mma_time, mma_axes,\n labels=['MMA65XX High-Range Acceleration',\n 'Time (s)', 'Acceleration (G)',\n [*MMA, 'magnitude']],\n fname='mma.html')\n # skybass_sampling_rates(data)",
"def plot_data(data, fig):\n\n if data.shape[1] > 3:\n print(\"Warning: data dimension is larger than 3, dim is %s\" % (data.shape[1]))\n\n ax = fig.add_subplot(111, projection='3d')\n # ax.scatter(data[:, 0], data[:, 1], data[:, 2], marker='.', s=0.5)\n return ax",
"def plot_data(self, show=False):\n\n fig, ax = plt.subplots(2, 1, figsize = (8, 6))\n plt.subplots_adjust(hspace=0.5)\n\n nrows = 10\n\n for _ in range(nrows):\n\n # plot nrows random examples from the simulated train data \n if self.flatten:\n print ('Plotting data... reshaping the flattened data to %s'%str(input_shape))\n temp = self.data['data'][np.random.randint(self.n_train * self.n_s)].reshape(input_shape)\n x, y = temp.T[:,0]\n else:\n print ('Plotting data...')\n temp = self.data['data'][np.random.randint(self.n_train * self.n_s)].reshape(ncombinations,len(ells))\n Cl = temp[0] # plot the (0,0) autocorrelation bin\n\n if self.rescaled:\n ax[0].plot(ells, Cl)\n else:\n ax[0].loglog(ells, ells*(ells+1)*Cl)\n ax[0].set_title(f'{nrows} examples from training data, Cl (0,0)')\n ax[0].set_xlabel(r'$\\ell$')\n ax[0].set_xscale('log')\n if self.rescaled:\n ax[0].set_ylabel(r'$C_\\ell$')\n else:\n ax[0].set_ylabel(r'$\\ell(\\ell+1) C_\\ell$')\n \n\n # plot nrows random examples from the simulated test data \n if self.flatten:\n temp = self.data['validation_data'][np.random.randint(self.n_s)].reshape(input_shape)\n x, y = temp.T[:,0]\n else:\n temp = self.data['validation_data'][np.random.randint(self.n_train * self.n_s)].reshape(ncombinations,len(ells))\n Cl = temp[0] # plot the (0,0) autocorrelation bin\n\n if self.rescaled:\n ax[1].plot(ells, Cl)\n else:\n ax[1].loglog(ells, ells*(ells+1)*Cl)\n ax[1].set_title(f'{nrows} examples from test data, Cl (0,0)')\n ax[1].set_xlabel(r'$\\ell$')\n ax[1].set_xscale('log')\n if self.rescaled:\n ax[1].set_ylabel(r'$C_\\ell$')\n else:\n ax[1].set_ylabel(r'$\\ell(\\ell+1) C_\\ell$')\n\n # plt.legend()\n\n plt.savefig(f'{self.figuredir}data_visualization_{self.modelversion}.png')\n if show: plt.show()\n plt.close()",
"def plot_3d(x_data, y_data, Z, df, xlabel, ylabel, xrange=None,\n yrange=None, figsize=(12, 12)):\n fig = pyplot.figure(figsize=figsize)\n ax = fig.add_subplot(111, projection='3d')\n nsamp, nsen = Z.shape\n\n sen_index = df.columns.names.index('sensor')\n senlist = df.columns.levels[sen_index]\n pyplot.yticks(y_data, senlist)\n ax.plot_surface(\n np.repeat(x_data,\n nsen, axis=1),\n np.repeat(np.matrix(y_data), nsamp, axis=0),\n df.values,\n cmap=cm.coolwarm)\n pyplot.xlabel(xlabel)\n pyplot.ylabel('Sensor name')\n ax.set_zlabel(ylabel)\n ax.view_init(elev=45., azim=-130)\n ax.tick_params(axis='y', which='major', labelsize=4)\n pyplot.show()",
"async def plot_device_data(self, axes, name) -> []:\n pass",
"def plot_three(estacion,formato):\n global num_ticks\n\n if formato == 'vladi':\n ruta='/home/oscar/Doctorado/GPS/programas/python/datos_vladi/completos/'\n ns_file = ruta + 'mb_' + estacion.upper() + '_GP0.dat1'\n ew_file = ruta + 'mb_' + estacion.upper() + '_GP0.dat2'\n up_file = ruta + 'mb_' + estacion.upper() + '_GP0.dat3'\n ns_archivo=open(ns_file,'r')\n ew_archivo=open(ew_file,'r')\n up_archivo=open(up_file,'r')\n ns_datos=ns_archivo.readlines()[3:]\n ew_datos=ew_archivo.readlines()[3:]\n up_datos=up_archivo.readlines()[3:]\n ns_date=np.zeros((len(ns_datos),1))\n ns_data=np.zeros((len(ns_datos),1))\n ns_dat=np.zeros((len(ns_datos),1))\n ns_error=np.zeros((len(ns_datos),1))\n for i,lineas in enumerate(ns_datos):\n ns_date[i],ns_data[i],ns_error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[2]\n ns_x,ns_y = rem_mean(ns_date,ns_data,ns_error)\n ns_ticks,ns_labels = t_ticks(ns_x[0],ns_x[-1],num_ticks)\n ns_y = ns_y *1e5\n ew_date=np.zeros((len(ew_datos),1))\n ew_data=np.zeros((len(ew_datos),1))\n ew_dat=np.zeros((len(ew_datos),1))\n ew_error=np.zeros((len(ew_datos),1))\n for i,lineas in enumerate(ew_datos):\n ew_date[i],ew_data[i],ew_error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[2]\n ew_x,ew_y = rem_mean(ew_date,ew_data,ew_error)\n ew_ticks,ew_labels = t_ticks(ew_x[0],ew_x[-1],num_ticks)\n ew_y = ew_y *1e5\n up_date=np.zeros((len(up_datos),1))\n up_data=np.zeros((len(up_datos),1))\n up_dat=np.zeros((len(up_datos),1))\n up_error=np.zeros((len(up_datos),1))\n for i,lineas in enumerate(up_datos):\n up_date[i],up_data[i],up_error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[2]\n up_x,up_y = rem_mean(up_date,up_data,up_error)\n up_ticks,up_labels = t_ticks(up_x[0],up_x[-1],num_ticks)\n up_y = up_y *1e5\n elif formato == 'sara':\n ruta = '/home/oscar/Doctorado/GPS/programas/python/datos_sara/'\n ns_file = ruta + estacion.upper() + '/lat.' + estacion.lower() + '.dat'\n ew_file = ruta + estacion.upper() + '/long.' + estacion.lower() + '.dat'\n up_file = ruta + estacion.upper() + '/height.' + estacion.lower() + '.dat'\n ns_archivo=open(ns_file,'r')\n ew_archivo=open(ew_file,'r')\n up_archivo=open(up_file,'r')\n ns_datos=ns_archivo.readlines()\n ew_datos=ew_archivo.readlines()\n up_datos=up_archivo.readlines()\n ns_date=np.zeros((len(ns_datos),1))\n ns_data=np.zeros((len(ns_datos),1))\n ns_dat=np.zeros((len(ns_datos),1))\n ns_error=np.zeros((len(ns_datos),1))\n for i,lineas in enumerate(ns_datos):\n ns_date[i],ns_data[i],ns_error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[2]\n ns_x,ns_y = rem_mean(ns_date,ns_data,ns_error)\n ns_ticks,ns_labels = t_ticks(ns_x[0],ns_x[-1],num_ticks)\n ns_y = ns_y *1e5\n ew_date=np.zeros((len(ew_datos),1))\n ew_data=np.zeros((len(ew_datos),1))\n ew_dat=np.zeros((len(ew_datos),1))\n ew_error=np.zeros((len(ew_datos),1))\n for i,lineas in enumerate(ew_datos):\n ew_date[i],ew_data[i],ew_error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[2]\n ew_x,ew_y = rem_mean(ew_date,ew_data,ew_error)\n ew_ticks,ew_labels = t_ticks(ew_x[0],ew_x[-1],num_ticks)\n ew_y = ew_y *1e5\n up_date=np.zeros((len(up_datos),1))\n up_data=np.zeros((len(up_datos),1))\n up_dat=np.zeros((len(up_datos),1))\n up_error=np.zeros((len(up_datos),1))\n for i,lineas in enumerate(up_datos):\n up_date[i],up_data[i],up_error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[2]\n up_x,up_y = rem_mean(up_date,up_data,up_error)\n up_ticks,up_labels = t_ticks(up_x[0],up_x[-1],num_ticks)\n up_y = up_y *1e5\n elif formato == 'cabral':\n ruta = '/home/oscar/Doctorado/GPS/programas/python/datos_enrique_cabral/'\n ns_file = ruta + 'north_' + estacion.upper()\n ew_file = ruta + 'east_' + estacion.upper()\n up_file = ruta + 'vert_' + estacion.upper()\n ns_archivo=open(ns_file,'r')\n ew_archivo=open(ew_file,'r')\n up_archivo=open(up_file,'r')\n ns_datos=ns_archivo.readlines()[1:]\n ew_datos=ew_archivo.readlines()[1:]\n up_datos=up_archivo.readlines()[1:]\n ns_date=np.zeros((len(ns_datos),1))\n ns_data=np.zeros((len(ns_datos),1))\n ns_dat=np.zeros((len(ns_datos),1))\n ns_error=np.zeros((len(ns_datos),1))\n for i,lineas in enumerate(ns_datos):\n ns_date[i],ns_data[i],ns_error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[8]\n ns_x = ns_date\n ns_y = ns_data\n ns_ticks,ns_labels = t_ticks(ns_x[0],ns_x[-1],num_ticks)\n ew_date=np.zeros((len(ew_datos),1))\n ew_data=np.zeros((len(ew_datos),1))\n ew_dat=np.zeros((len(ew_datos),1))\n ew_error=np.zeros((len(ew_datos),1))\n for i,lineas in enumerate(ew_datos):\n ew_date[i],ew_data[i],ew_error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[8]\n ew_x = ew_date\n ew_y = ew_data\n ew_ticks,ew_labels = t_ticks(ew_x[0],ew_x[-1],num_ticks)\n up_date=np.zeros((len(up_datos),1))\n up_data=np.zeros((len(up_datos),1))\n up_dat=np.zeros((len(up_datos),1))\n up_error=np.zeros((len(up_datos),1))\n for i,lineas in enumerate(up_datos):\n up_date[i],up_data[i],up_error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[6]\n up_x = up_date\n up_y = up_data\n up_ticks,up_labels = t_ticks(up_x[0],up_x[-1],num_ticks)\n else:\n exit('[ERROR] Unrecognized format')\n\n ind = np.where(ns_x >= 2000)\n ns_x = ns_x[ind[0]]\n ns_y = ns_y[ind[0]]\n ind = np.where(ew_x >= 2000)\n ew_x = ew_x[ind[0]]\n ew_y = ew_y[ind[0]]\n ind = np.where(up_x >= 2000)\n up_x = up_x[ind[0]]\n up_y = up_y[ind[0]]\n\n plt.figure(num=None, figsize=(7, 13))\n plt.subplots_adjust(wspace=.05)\n plt.subplot(3,1,1)\n plt.grid()\n plt.plot(ns_x,ns_y,'ro',mec='green',mfc='red',mew=.5,ms=3.0,alpha=0.5)\n plt.ylabel('Milimeters')\n plt.xticks(ns_ticks,ns_labels,rotation=30)\n plt.xlim(ns_x[0], ns_x[-1])\n plt.title('%s - %s' % (estacion.upper(),'NS'))\n plt.subplot(3,1,2)\n plt.grid()\n plt.plot(ew_x,ew_y,'ro',mec='blue',mfc='red',mew=.5,ms=3.0,alpha=0.5)\n plt.ylabel('Milimeters')\n plt.xticks(ew_ticks,ew_labels,rotation=30)\n plt.xlim(ns_x[0], ns_x[-1])\n plt.title('%s - %s' % (estacion.upper(),'EW'))\n plt.subplot(3,1,3)\n plt.grid()\n plt.plot(up_x,up_y,'ro',mec='blue',mfc='green',mew=.5,ms=3.0,alpha=0.5)\n plt.xlabel('Years since %4.1f'% (up_date[0]))\n plt.ylabel('Milimeters')\n plt.xticks(up_ticks,up_labels,rotation=30)\n plt.xlim(ns_x[0], ns_x[-1])\n plt.title('%s - %s' % (estacion.upper(),'UP'))\n plt.subplots_adjust(bottom=0.1, top=0.95, hspace=.43)\n# plt.savefig(estacion.upper()+'_'+formato+'.jpg',dpi=300)\n plt.show()",
"def __init__(self, fpga, plots, chann=6069, freqs=[0, 67.5], bw=67.5):\n\n self.fpga = fpga\n self.fpga.write_int('cnt_rst',0) #just in case\n self.plots = plots\n self.nplots = len(self.plots)\n self.chann = chann\n self.freq = freqs\n self.bw = bw\n self.fft_freq = np.linspace(0, bw, 2**13,endpoint=False)\n self.plot_map = {1:'11', 2:'12', 3:'22', 4:'22', 5:'23',\n 6:'23', 7: '33', 8:'33', 9:'33'}\n self.fig = plt.figure()\n self.axes = []\n self.data = [] \n \n #generate a dict for the specification of each plot\n #the info is encoded in [title, y_label, x_label,(y_init, y_end), (x_init, x_end), [brams], data_type]\n self.plot_info = {'spect0':['Spectrum ZDOK0', '[dB]', '[MHz]',\n (30, 180), (self.freq), ['1_A2'], '>8192Q'],\n 'spect1':['Spectrum ZDOK1', '[dB]', '[MHz]',\n (30, 180), (self.freq), ['1_B2'], '>8192Q'],\n 're_full':['Real correlation', '', '[MHz]',\n (30,180), (self.freq), ['AB_re'], '>8192q'],\n 'im_full':['Imag correlation', '', '[MHz]',\n (30,180), (self.freq), ['AB_im'], '>8192q'],\n 'phase':['Relative Phase', ('['+u'\\xb0'+']'), '[MHz]',\n (-180,180), (self.freq), ['AB_im', 'AB_re'], '>8192q'],\n 'chann_pow':['Relative Power at'+str(self.fft_freq[self.chann]),\n '[dB]','[MHz]',(-180,180), (0, 8191),\n ['PowA', 'PowB'], '>8192Q'],\n 'chann_phase':['Relative phase at'+str(self.fft_freq[self.chann]),\n ('['+u'\\xb0'+']'), '[MHz]',(-180,180), (0,8191),\n ['phase'], '>16384q']}\n\n\tself.fpga.write_int('mux_sel',0)\n\tself.fpga.write_int('n_points', 16384)\n\tself.fpga.write_int('reading_data',1)\n\tself.fpga.write_int('reading_data',0)\n\tself.create_plots()\n\tanim = animation.FuncAnimation(self.fig, self.animate, blit=True)\n\tplt.show()",
"def plot_data(x):\n if DATA_2D:\n plt.scatter(x[:, 0], x[:, 1])\n plt.show()\n else:\n fig = plt.figure()\n ax = Axes3D(fig)\n ax.scatter(x[:, 0], x[:, 1], x[:, 2])\n ax.set_xlabel('X Label')\n ax.set_ylabel('Y Label')\n ax.set_zlabel('Z Label')\n plt.show()",
"def plot(self):\n\n # initialize outside the loop to avoid memory leak\n\n plot_a = None\n\n # initial plotting scales\n vmin = 0\n vmax = 0\n pmin = 0\n pmax = 0\n\n sr = self.dio.get_properties(self.channel)['samples_per_second']\n\n if self.control.verbose:\n print 'sample rate: ', sr\n\n # initial time info\n display_lag = 60\n b = self.dio.get_bounds(self.channel)\n\n if self.control.verbose:\n print 'data bounds: ', b\n\n if self.control.start:\n dtst0 = dateutil.parser.parse(self.control.start)\n st0 = (dtst0 - datetime.datetime(1970, 1,\n 1, tzinfo=pytz.utc)).total_seconds()\n st0 = int(st0 * sr)\n else:\n st0 = int(b[0])\n\n if self.control.end:\n dtst0 = dateutil.parser.parse(self.control.end)\n et0 = (dtst0 - datetime.datetime(1970, 1,\n 1, tzinfo=pytz.utc)).total_seconds()\n et0 = int(et0 * sr)\n else:\n et0 = int(b[1])\n\n if self.control.verbose:\n\n print 'start sample st0: ', st0\n print 'end sample et0: ', et0\n\n blocks = self.control.bins * self.control.frames\n\n samples_per_stripe = self.control.num_fft * \\\n self.control.integration * self.control.decimation\n total_samples = blocks * samples_per_stripe\n\n if total_samples > (et0 - st0):\n print 'Insufficient samples for %d samples per stripe and %d blocks between %ld and %ld' % (samples_per_stripe, blocks, st0, et0)\n return\n\n stripe_stride = (et0 - st0) / blocks\n\n bin_stride = stripe_stride / self.control.bins\n\n start_sample = st0\n\n print 'first ', start_sample\n\n # get metadata\n # this could be done better to ensure we catch frequency or sample rate\n # changes\n mdt = self.dio.read_metadata(st0, et0, self.channel)\n try:\n md = mdt[mdt.keys()[0]]\n cfreq = md['center_frequencies'].ravel()[self.sub_channel]\n except (IndexError, KeyError):\n cfreq = 0.0\n\n if self.control.verbose:\n print 'processing info : ', self.control.frames, self.control.bins, samples_per_stripe, bin_stride\n\n for p in numpy.arange(self.control.frames):\n sti_psd_data = numpy.zeros(\n [self.control.num_fft, self.control.bins], numpy.float)\n sti_times = numpy.zeros([self.control.bins], numpy.complex128)\n\n for b in numpy.arange(self.control.bins):\n\n if self.control.verbose:\n print 'read vector :', self.channel, start_sample, samples_per_stripe\n\n d_vec = self.dio.read_vector(\n start_sample, samples_per_stripe, self.channel)\n data = d_vec[:, self.sub_channel]\n\n if self.control.decimation > 1:\n data = scipy.signal.decimate(data, self.control.decimation)\n sample_freq = sr / self.control.decimation\n else:\n sample_freq = sr\n\n if self.control.mean:\n detrend_fn = matplotlib.mlab.detrend_mean\n else:\n detrend_fn = matplotlib.mlab.detrend_none\n\n try:\n psd_data, freq_axis = matplotlib.mlab.psd(\n data, NFFT=self.control.num_fft, Fs=float(sample_freq), detrend=detrend_fn, scale_by_freq=False)\n except:\n traceback.print_exc(file=sys.stdout)\n\n sti_psd_data[:, b] = numpy.real(\n 10.0 * numpy.log10(numpy.abs(psd_data) + 1E-12))\n\n sti_times[b] = start_sample / sr\n\n start_sample += stripe_stride\n\n # Now Plot the Data\n ax = self.subplots[p]\n\n # determine image x-y extent\n extent = (\n 0,\n self.control.bins,\n numpy.min(freq_axis) / 1e3,\n numpy.max(freq_axis) / 1e3,\n )\n\n # determine image color extent in log scale units\n Pss = sti_psd_data\n vmin = numpy.real(numpy.median(Pss) - 6.0)\n vmax = numpy.real(numpy.median(\n Pss) + (numpy.max(Pss) - numpy.median(Pss)) * 0.61803398875 + 50.0)\n\n if self.control.zaxis:\n vmin = int(string.split(self.control.zaxis, ':')[0])\n vmax = int(string.split(self.control.zaxis, ':')[1])\n else:\n vmin = numpy.real(numpy.median(Pss) - 6.0)\n vmax = numpy.real(numpy.median(\n Pss) + (numpy.max(Pss) - numpy.median(Pss)) * 0.61803398875 + 50.0)\n\n im = ax.imshow(sti_psd_data, cmap='jet', origin='lower', extent=extent,\n interpolation='nearest', vmin=vmin, vmax=vmax, aspect='auto')\n\n ax.set_ylabel('f (kHz)', fontsize=8)\n\n # plot dates\n\n tick_spacing = numpy.arange(\n self.control.bins / 8, self.control.bins, self.control.bins / 8)\n ax.set_xticks(tick_spacing)\n tick_labels = []\n\n for s in tick_spacing:\n tick_time = sti_times[s]\n\n if tick_time == 0:\n tick_string = ''\n else:\n gm_tick_time = time.gmtime(numpy.real(tick_time))\n tick_string = '%02d:%02d:%02d' % (\n gm_tick_time[3], gm_tick_time[4], gm_tick_time[5])\n tick_labels.append(tick_string)\n\n ax.set_xticklabels(tick_labels)\n\n # set the font sizes\n tl = ax.get_xticklabels()\n\n for tk in tl:\n tk.set_size(8)\n del tl\n\n tl = ax.get_yticklabels()\n\n for tk in tl:\n tk.set_size(8)\n del tl\n\n print 'last ', start_sample\n\n # create a time stamp\n start_time = st0 / sr\n srt_time = time.gmtime(start_time)\n sub_second = int(round((start_time - int(start_time)) * 100))\n\n timestamp = \"%d-%02d-%02d %02d:%02d:%02d.%02d UT\" % (srt_time[0], srt_time[\n 1], srt_time[2], srt_time[3], srt_time[4], srt_time[5], sub_second)\n\n self.f.suptitle('%s %s %4.2f MHz (%s)' % (\n self.control.title, timestamp, cfreq / 1E6, self.control.path), fontsize=10)\n\n # ax.legend(fontsize=8)\n ax.set_xlabel('time (UTC)', fontsize=8)\n\n # fixup ticks\n\n tl = ax.get_xticklabels()\n for tk in tl:\n tk.set_size(8)\n del tl\n tl = ax.get_yticklabels()\n for tk in tl:\n tk.set_size(8)\n del tl\n\n self.gridspec.update()\n\n self.f.tight_layout()\n\n self.f.subplots_adjust(top=0.95, right=0.88)\n cax = self.f.add_axes([0.9, 0.12, 0.02, 0.80])\n self.f.colorbar(im, cax=cax)\n if self.control.outname:\n fname, ext = os.path.splitext(self.control.outname)\n if ext == '':\n ext = '.png'\n print \"Save plot as {}\".format(fname+ext)\n matplotlib.pyplot.savefig(fname+ext)\n if self.control.appear or not self.control.outname:\n print \"Show plot\"\n matplotlib.pyplot.show()",
"def plot_sensors_3d_inter(ax: str, epo1: mne.Epochs, epo2: mne.Epochs, lab: bool = False):\n\n # extract sensor infos and transform loc to fit with headmodel \n loc1 = copy(np.array([ch['loc'][:3] for ch in epo1.info['chs']]))\n loc1 = transform(loc1, traX=-0.17, traY=0, traZ=0.08, rotY=(-np.pi/12), rotZ=(-np.pi/2))\n lab1 = [ch for ch in epo1.ch_names]\n\n loc2 = copy(np.array([ch['loc'][:3] for ch in epo2.info['chs']]))\n loc2 = transform(loc2, traX=0.17, traY=0, traZ=0.08, rotY=(np.pi/12), rotZ=np.pi/2)\n lab2 = [ch for ch in epo2.ch_names]\n\n bads_epo1 =[]\n bads_epo1 = epo1.info['bads']\n bads_epo2 =[]\n bads_epo2 = epo2.info['bads']\n\n # plot sensors ('x' for bads)\n for ch in epo1.ch_names:\n if ch in bads_epo1:\n index_ch = epo1.ch_names.index(ch)\n x1, y1, z1 = loc1[index_ch, :]\n ax.scatter(x1, y1, z1, marker='x', color='dimgrey')\n if lab:\n ax.text(x1+0.012, y1+0.012 ,z1, lab1[index_ch],\n horizontalalignment='center',\n verticalalignment='center')\n else:\n index_ch = epo1.ch_names.index(ch)\n x1, y1, z1 = loc1[index_ch, :]\n ax.scatter(x1, y1, z1, marker='o', color='dimgrey')\n if lab:\n ax.text(x1+0.012, y1+0.012 ,z1, lab1[index_ch],\n horizontalalignment='center',\n verticalalignment='center')\n\n for ch in epo2.ch_names:\n if ch in bads_epo2:\n index_ch = epo2.ch_names.index(ch)\n x2, y2, z2 = loc2[index_ch, :]\n ax.scatter(x2, y2, z2, marker='x', color='dimgrey')\n if lab:\n ax.text(x2+0.012, y2+0.012 ,z2, lab2[index_ch],\n horizontalalignment='center',\n verticalalignment='center')\n else:\n index_ch = epo2.ch_names.index(ch)\n x2, y2, z2 = loc2[index_ch, :]\n ax.scatter(x2, y2, z2, marker='o', color='dimgrey')\n if lab:\n ax.text(x2+0.012, y2+0.012 ,z2, lab2[index_ch],\n horizontalalignment='center',\n verticalalignment='center')",
"def visualize_signal(self):\n plt.figure()\n plt.title('Accelerometer Signal')\n plt.plot(range(len(self.data)), self.data[1])",
"def plot_sensors_3d_intra(ax: str, epo1: mne.Epochs, epo2: mne.Epochs, lab: bool = False):\n\n # extract sensor infos and transform loc to fit with headmodel \n loc1 = copy(np.array([ch['loc'][:3] for ch in epo1.info['chs']]))\n loc1 = transform(loc1, traX=0, traY=0, traZ=0.04, rotY=0, rotZ=(-np.pi/2))\n lab1 = [ch for ch in epo1.ch_names]\n\n loc2 = copy(np.array([ch['loc'][:3] for ch in epo2.info['chs']]))\n loc2 = transform(loc2, traX=0, traY=0.5, traZ=0.04, rotY=0, rotZ=(-np.pi/2))\n lab2 = [ch for ch in epo2.ch_names]\n\n bads_epo1 =[]\n bads_epo1 = epo1.info['bads']\n bads_epo2 =[]\n bads_epo2 = epo2.info['bads']\n\n # plot sensors\n for ch in epo1.ch_names:\n if ch in bads_epo1:\n index_ch = epo1.ch_names.index(ch)\n x1, y1, z1 = loc1[index_ch, :]\n ax.scatter(x1, y1, z1, marker='x', color='dimgrey')\n if lab:\n ax.text(x1+0.012, y1+0.012 ,z1, lab1[index_ch],\n horizontalalignment='center',\n verticalalignment='center')\n else:\n index_ch = epo1.ch_names.index(ch)\n x1, y1, z1 = loc1[index_ch, :]\n ax.scatter(x1, y1, z1, marker='o', color='dimgrey')\n if lab:\n ax.text(x1+0.012, y1+0.012 ,z1, lab1[index_ch],\n horizontalalignment='center',\n verticalalignment='center')\n\n for ch in epo2.ch_names:\n if ch in bads_epo2:\n index_ch = epo2.ch_names.index(ch)\n x2, y2, z2 = loc2[index_ch, :]\n ax.scatter(x2, y2, z2, marker='x', color='dimgrey')\n if lab:\n ax.text(x2+0.012, y2+0.012 ,z2, lab2[index_ch],\n horizontalalignment='center',\n verticalalignment='center')\n else:\n index_ch = epo2.ch_names.index(ch)\n x2, y2, z2 = loc2[index_ch, :]\n ax.scatter(x2, y2, z2, marker='o', color='dimgrey')\n if lab:\n ax.text(x2+0.012, y2+0.012 ,z2, lab2[index_ch],\n horizontalalignment='center',\n verticalalignment='center')",
"def plot(axes, axis, values, c='chartreuse'):\n a = axes[axis]\n a.set_xlabel('time (s)')\n x = np.array(range(len(values))) / 1000\n dim = 'x' if axis == 0 else 'y' if axis == 1 else 'z'\n a.set_title('-'.join([dim, 'acceleration']))\n a.plot(x, values / 1000, c=c)",
"def plot_data(self):",
"def plot_autocorrs(self, axis=0, n_rows=4, n_cols=8):\n self.current_plot = 'multi'\n self.ax_zoomed = False\n \n bls = self.uv.d_uv_data['BASELINE']\n\n # Extract the relevant baselines using a truth array\n # bls = bls.tolist()\n bl_ids = set([256*i + i for i in range(1, n_rows * n_cols + 1)])\n bl_truths = np.array([(b in bl_ids) for b in bls])\n \n #print self.uv.d_uv_data['DATA'].shape\n #x_data = self.d_uv_data['DATA'][bl_truths,0,0,:,0,axis] # Baselines, freq and stokes\n #x_cplx = x_data[:,:,0] + 1j * x_data[:,:,1]\n\n x_cplx = self.stokes[axis][bl_truths]\n\n\n \n # Plot the figure\n #print self.uv.n_ant\n fig = self.sp_fig\n figtitle = '%s %s: %s -- %s'%(self.uv.telescope, self.uv.instrument, self.uv.source, self.uv.date_obs)\n for i in range(n_rows):\n for j in range(n_cols):\n ax = fig.add_subplot(n_rows, n_cols, i*n_cols + j +1)\n ax.set_title(self.uv.d_array_geometry['ANNAME'][i*n_cols + j], fontsize=10)\n #ax.set_title(\"%s %s\"%(i, j))\n \n x = x_cplx[i*n_cols+j::self.uv.n_ant]\n \n if self.scale_select.currentIndex() == 0 or self.scale_select.currentIndex() == 1:\n if x.shape[0] == self.uv.n_ant:\n self.plot_spectrum(ax, x, label_axes=False)\n else:\n self.plot_spectrum(ax, x, stat='max', label_axes=False)\n self.plot_spectrum(ax, x, stat='med', label_axes=False)\n self.plot_spectrum(ax, x, stat='min', label_axes=False)\n else:\n self.plot_spectrum(ax, x, label_axes=False)\n self.updateFreqAxis(ax)\n \n if i == n_rows-1:\n ax.set_xlabel('Freq')\n if j == 0:\n ax.set_ylabel('Amplitude')\n \n plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n plt.tick_params(axis='both', which='major', labelsize=10)\n plt.tick_params(axis='both', which='minor', labelsize=8)\n plt.xticks(rotation=30)\n \n plt.subplots_adjust(left=0.05, right=0.98, top=0.95, bottom=0.1, wspace=0.3, hspace=0.45)\n return fig, ax",
"def plot_arias(stream, axes=None, axis_index=None,\n figsize=None, file=None, minfontsize=14, show=False,\n show_maximum=True, title=None, xlabel=None, ylabel=None):\n if len(stream) < 1:\n raise Exception('No traces contained within the provided stream.')\n\n stream = get_acceleration(stream, units='m/s/s')\n Ia = calculate_arias(stream, ['channels'], True)[0]\n\n starttime = stream[0].stats.starttime\n if title is None:\n title = 'Event on ' + str(starttime.month) + '/' + str(starttime.day) + '/' + \\\n str(starttime.year)\n if xlabel is None:\n xlabel = 'Time (s)'\n if ylabel is None:\n ylabel = 'Ia (m/s)'\n\n if figsize is None:\n figsize = (6.5, 7.5)\n if axes is None:\n fig, axs = plt.subplots(len(Ia), 1, figsize=figsize)\n axis_numbers = np.linspace(0, len(Ia) - 1, len(Ia))\n elif axis_index is not None:\n axs = axes\n axis_numbers = np.linspace(axis_index, axis_index + len(Ia) - 1, len(Ia))\n for idx, trace in zip(axis_numbers.astype(int), Ia):\n ax = axs[idx]\n dt = trace.stats['delta']\n npts = len(trace.data)\n t = np.linspace(0, (npts-1)*dt, num=npts)\n network = trace.stats['network']\n station = trace.stats['station']\n channel = trace.stats['channel']\n trace_label = network + '.' + station + '.' + channel\n ax.set_title(trace_label, fontsize=minfontsize)\n ax.plot(t, trace.data)\n if show_maximum:\n abs_arr = np.abs(trace.data.copy())\n idx = np.argmax(abs_arr)\n max_value = abs_arr[idx]\n ax.plot([t[idx]], [trace.data[idx]], marker='o', color=\"red\")\n ax.annotate('%.2E' % max_value, (t[idx], trace.data[idx]),\n xycoords='data', xytext=(.85, 0.25),\n textcoords='axes fraction',\n arrowprops=dict(facecolor='black',\n shrink=0.05, width=1, headwidth=4),\n horizontalalignment='right', verticalalignment='top')\n ax.set_xlabel(xlabel, fontsize=minfontsize)\n ax.set_ylabel(ylabel, fontsize=minfontsize)\n ax.xaxis.set_tick_params(labelsize=minfontsize - 2)\n ax.yaxis.set_tick_params(labelsize=minfontsize - 2)\n plt.suptitle(title, y=1.01, fontsize=minfontsize + 4)\n plt.tight_layout()\n if show and axes is None:\n plt.show()\n if file is not None and axes is None:\n fig.savefig(file, format='png')\n return axs",
"def plot_examples(cms):\r\n data = amp_axis\r\n\r\n fig, axs = plt.subplots(1, 2, figsize=(30, 8)) #create two plots\r\n for [ax, cmap] in zip(axs, cms):\r\n psm = ax.pcolormesh(time_axis, tof_axis, data, cmap=cmap, rasterized=True, vmin = 250) #specify axis and minimum amplitude value to show on the graph\r\n fig.colorbar(psm, ax=ax, label = 'Amplitude') #define the legend of the amplitude data\r\n \r\n ax.set_ylabel('Time of Flight [\\u03bcs]') #set label for y axis\r\n ax.set_xlabel('Time [min]') #set label for x axis\r\n \r\n ax.hlines(8.744, 0, stop_time, colors = 'white') #create two white lines for the safe operating range for ToF\r\n ax.hlines(9.555, 0, stop_time, colors = 'white') \r\n \r\n plt.show()",
"def plotData(BX,BY,xi,yi,expArr,t,savepath_dir):\r\n \r\n #Find the current channel data\r\n Jz=newCurrent(BX,BY,xi,yi,expArr,t)\r\n\r\n #Find the dipole vector components\r\n BxTime=np.real(BX*expArr[t])\r\n ByTime=np.real(BY*expArr[t])\r\n\r\n #Plot the current density contour and dipole vector grid\r\n #Create the figure\r\n p1=plt.figure(figsize=(9,8))\r\n \r\n #Plot the data\r\n p1=plt.contourf(xi,yi,Jz,levels=100,vmin=-0.1,vmax=0.1)\r\n qv1=plt.quiver(xi,yi,BxTime,ByTime,width=0.004,scale=3)\r\n \r\n #Add axes labels and title\r\n p1=plt.xlabel('X [cm]',fontsize=20)\r\n p1=plt.ylabel('Y [cm]',fontsize=20)\r\n # p1=plt.title('Alfven Wave Dipole; Frequency='+str(freq)+r'KHz; $\\nu_{ei}$='+str(col)+'KHz',fontsize=19,y=1.02)\r\n p1=plt.title('E Field; Frequency='+str(freq)+r'KHz; $\\nu_{ei}$='+str(col)+'KHz',fontsize=19,y=1.02)\r\n \r\n #Set axes parameters\r\n p1=plt.xticks(np.arange(-50,51,5))\r\n p1=plt.yticks(np.arange(-50,51,5))\r\n p1=plt.xlim(-xAxisLim,xAxisLim)\r\n p1=plt.ylim(-yAxisLim,yAxisLim)\r\n \r\n #Add colorbar\r\n cbar=plt.colorbar()\r\n cbar.set_label('Normalized Current Density',rotation=270,labelpad=15)\r\n cbar=plt.clim(-1,1)\r\n \r\n #Add vector label\r\n plt.quiverkey(qv1,-0.1,-0.1,0.2,label=r'$(B_x,B_y)$')\r\n \r\n #Miscellaneous\r\n p1=plt.tick_params(axis='both', which='major', labelsize=18)\r\n p1=plt.grid(True)\r\n p1=plt.gcf().subplots_adjust(left=0.15)\r\n\r\n #Save the plot\r\n savepath_frame=savepath_dir+'frame'+str(t+1)+'.png'\r\n p1=plt.savefig(savepath_frame,dpi=100,bbox_to_anchor='tight')\r\n p1=plt.close()\r\n\r\n #Let me know which frame we just saved\r\n print('Saved frame '+str(t+1)+' of '+str(len(expArr)))\r\n \r\n return",
"def show_data(self):\n\n self.area_canvas.axes.cla()\n self.draw_scatterplot(self.scatter_canvas, 'x [µm]', 'y [µm]', self.p_inputs['flip y-axis'].isChecked())\n self.draw_hist(self.area_canvas, 'area', 'cluster area [µm²]', 'number of clusters')\n self.draw_hist(self.number_canvas, 'nclusters', 'number of cluster', 'number of regions')\n self.draw_hist(self.density_canvas, 'density', 'cluster density [µm⁻²]', 'number of clusters')\n self.draw_hist(self.percentage_canvas, 'pclustered', 'percentage clustered',\n 'number of regions')\n self.draw_hist(self.ratio_canvas, 'reldensity', 'relative density clusters/background',\n 'number of regions')",
"def make_wave_plots(Nsteps=NSTEPS, plotAmp=0):\n\n if plotAmp != 2:\n ax, fig = setup_plot()\n\n for k in range(Nsteps):\n if plotAmp == 2: # separate plot for each step\n ax, fig = setup_plot()\n\n for i in range(len(q_vec)):\n dataLen = len(t_vec)\n startIdx = k*dataLen/Nsteps\n endIdx = (k+1)*dataLen/Nsteps\n xVals = np.ones(dataLen)*q_vec[i]\n yVals = t_vec\n zVals = h_vec[i]\n zorder_waves = get_z_order_waves(q_vec, i)\n\n if plotAmp==0: # plot real parts in increments\n ax.plot3D(xVals[startIdx:endIdx], yVals[startIdx:endIdx], \\\n zs=np.real(zVals[startIdx:endIdx]), color=color_wave_list[i], \\\n lw=line_width, zorder=zorder_waves)\n elif plotAmp==1: # plot fixed real part, but amplitude in increments\n if k == 0:\n ax.plot3D(xVals, yVals, zs=np.real(zVals), color=color_wave_list[i], \\\n lw=line_width, zorder=zorder_waves)\n ax.plot3D(xVals[startIdx:endIdx], yVals[startIdx:endIdx], \\\n zs=np.abs(zVals[startIdx:endIdx]), color=color_amp, \\\n lw=line_width, zorder=zorder_waves)\n elif plotAmp==2: # plot amplitude, roll along t with height, plot empirical time nodes\n ax.plot3D(xVals, yVals, zs=np.abs(zVals), color=color_amp, lw=line_width) # plot amp\n tmp_dataLen = 1000\n tmp_x = q_vec[i]\n tmp_y = t_vec[startIdx]\n tmp_z = np.abs(h_vec[i][startIdx])\n tmp_xVals = np.ones(tmp_dataLen)*tmp_x\n tmp_yVals = np.ones(tmp_dataLen)*tmp_y\n tmp_zVals = np.linspace(0, tmp_z, tmp_dataLen)\n ax.plot3D(tmp_xVals, tmp_yVals, zs=tmp_zVals, color=color_ht, lw=line_width) # roll along t with height\n if tmp_y < 75: # plot the marker for the roll\n ax.scatter(tmp_x, tmp_y, tmp_z, c=color_marker, marker='s', s=marker_size_square)\n\n # plot empirical nodes after rolling past them\n for emp_time in empirical_node_times:\n if tmp_y >= emp_time: # If we rolled past this emp node, plot it always\n emp_idx = np.argmin(np.abs(t_vec - emp_time))\n this_x = q_vec[i]\n this_y = t_vec[emp_idx]\n this_z = np.abs(h_vec[i][emp_idx])\n # plot marker for empirical nodes\n ax.scatter(this_x, this_y, this_z, c=color_marker_nd, \\\n marker='o', s=marker_size)\n\n ax.set_xlim(0, 10)\n ax.set_ylim(min(t_vec), max(t_vec))\n ax.set_zlim(-ZLIM, ZLIM)\n ax.set_xlabel('$q$', fontsize=label_fontsize)\n ax.set_ylabel('$t$ $(M)$', fontsize=label_fontsize)\n\n\n # Make sure to save such that files end up in the same alphabetical order as you\n # want in the video. Here we just add a fake prefix.\n if plotAmp==0:\n save_plot('%s/aa_wave_%.4d.png'%(plotdir, k))\n elif plotAmp==1:\n save_plot('%s/ab_waveamp_%.4d.png'%(plotdir, k))\n elif plotAmp==2:\n ax.set_zlim(0, ZLIM)\n save_plot('%s/ba_amp_%.4d.png'%(plotdir, k))\n P.close()\n\n P.close()",
"def plot_data(array_list, params):\n\tkey = array_list[0]\n\tvals = array_list[1]\n\tprint key\n\tprint len(vals)\n\tfigure_id = 1\n\tfor item in params:\n\t\tx_axis = get_by_keys(item['x_keys'], key, vals)\n\t\ty_axis = get_by_keys(item['y_keys'], key, vals)\n\t\tplt.figure(figure_id)\n\t\tplt.plot(x_axis, y_axis)\n\t\tfigure_id = figure_id + 1\n\tplt.show()",
"def plot_data(self, filepath=None, time_min=None, time_max=None, title=None,\n electrode=None):\n\n # normalizes the samples x electrodes array containing the EEG data and\n # adds 1 to each row so that the y-axis value corresponds to electrode\n # location in the MNI coordinate (x,y,z) by electrode df containing\n # electrode locations\n\n if self.get_data().shape[0] == 1:\n nii = self.to_nii()\n nii.plot_glass_brain(pdfpath=filepath)\n elif self.get_data().empty:\n fig = plt.figure()\n ax = fig.add_subplot(111, aspect='equal')\n ax.set_facecolor('w')\n ax.set_xlabel(\"time\")\n ax.set_ylabel(\"electrode\")\n if filepath:\n plt.savefig(filename=filepath)\n else:\n plt.show()\n else:\n Y = _normalize_Y(self.data) # self.get_data()) this allows us to plot all the electrodes even the recon ones\n\n if electrode is not None:\n Y = Y.loc[:, electrode]\n if len(Y.shape) > 1:\n for i, column in enumerate(Y):\n Y[column] = Y[column] - int(column) + i\n\n # divide index by sample rate so that index corresponds to time\n if self.sample_rate:\n Y.index = np.divide(Y.index,np.mean(self.sample_rate))\n\n # if a time window is designated index data in that window\n if all([time_min, time_max]):\n mask = (Y.index >= time_min) & (Y.index <= time_max)\n Y = Y[mask]\n\n # if a time window is not designated, default to the first 500 seconds\n else:\n time_min = 0\n time_max = 10\n mask = (Y.index >= time_min) & (Y.index <= time_max)\n Y= Y[mask]\n \n if electrode:\n if len(Y.shape) > 1:\n ax = Y.plot(title=title, lw=.6)\n else:\n ax = Y.plot(title=title, lw=.6, color='k')\n else:\n ax = Y.plot(legend=False, title=title, color='k', lw=.6)\n ax.set_facecolor('w')\n ax.set_xlabel(\"time\")\n ax.set_ylabel(\"electrode\")\n\n if filepath:\n plt.savefig(filename=filepath)\n else:\n plt.show()",
"def run(self):\n # fill the x_values,y_values,z_values dictionaries\n if not self.__fillCoordinatesFromSource():\n self.raiseAWarning('Nothing to Plot Yet. Returning.')\n return\n\n self.counter += 1\n if self.counter > 1:\n self.actcm = None\n clusterDict = deepcopy(self.outStreamTypes)\n\n # start plotting.... loop over the plots that need to be included in this figure\n for pltIndex in range(len(self.outStreamTypes)):\n plotSettings = self.options['plotSettings']['plot'][pltIndex]\n if 'gridLocation' in plotSettings:\n x = None\n y = None\n if 'x' in plotSettings['gridLocation']:\n x = list(map(int, plotSettings['gridLocation']['x'].strip().split(' ')))\n else:\n x = None\n if 'y' in plotSettings['gridLocation'].keys():\n y = list(map(int, plotSettings['gridLocation']['y'].strip().split(' ')))\n else:\n y = None\n if pltIndex == 0:\n self.ax.remove() # remove axis so that there is not an extra axis on plot with subplots\n if (len(x) == 1 and len(y) == 1):\n if self.dim == 2:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0], y[0]])\n else:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0], y[0]], projection='3d')\n elif (len(x) == 1 and len(y) != 1):\n if self.dim == 2:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0], y[0]:y[-1]])\n else:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0], y[0]:y[-1]], projection='3d')\n elif (len(x) != 1 and len(y) == 1):\n if self.dim == 2:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0]:x[-1], y[0]])\n else:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0]:x[-1], y[0]], projection='3d')\n else:\n if self.dim == 2:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0]:x[-1], y[0]:y[-1]])\n else:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0]:x[-1], y[0]:y[-1]], projection='3d')\n\n if 'gridSpace' in self.options['plotSettings']:\n self.ax.locator_params(axis='y', nbins=4)\n self.ax.locator_params(axis='x', nbins=2)\n if 'range' in plotSettings:\n axes_range = plotSettings['range']\n if 'ymin' in axes_range:\n self.ax.set_ylim(bottom=ast.literal_eval(axes_range['ymin']))\n if 'ymax' in axes_range:\n self.ax.set_ylim(top=ast.literal_eval(axes_range['ymax']))\n if 'xmin' in axes_range:\n self.ax.set_xlim(left=ast.literal_eval(axes_range['xmin']))\n if 'xmax' in axes_range:\n self.ax.set_xlim(right=ast.literal_eval(axes_range['xmax']))\n if self.dim == 3:\n if 'zmin' in axes_range.options['plotSettings']['plot'][pltIndex]:\n if 'zmax' not in axes_range.options['plotSettings']:\n self.raiseAWarning('zmin inputted but not zmax. zmin ignored! ')\n else:\n self.ax.set_zlim(bottom=ast.literal_eval(axes_range['zmin']), top=ast.literal_eval(self.options['plotSettings']['zmax']))\n if 'zmax' in axes_range:\n if 'zmin' not in axes_range:\n self.raiseAWarning('zmax inputted but not zmin. zmax ignored! ')\n else:\n self.ax.set_zlim(bottom=ast.literal_eval(axes_range['zmin']), top=ast.literal_eval(axes_range['zmax']))\n if 'xlabel' not in plotSettings:\n self.ax.set_xlabel('x')\n else:\n self.ax.set_xlabel(plotSettings['xlabel'])\n if 'ylabel' not in plotSettings:\n self.ax.set_ylabel('y')\n else:\n self.ax.set_ylabel(plotSettings['ylabel'])\n if 'zlabel' in plotSettings:\n if self.dim == 2:\n self.raiseAWarning('zlabel keyword does not make sense in 2-D Plots!')\n elif self.dim == 3 and self.zCoordinates:\n self.ax.set_zlabel(plotSettings['zlabel'])\n elif self.dim == 3 and self.zCoordinates:\n self.ax.set_zlabel('z')\n else:\n if 'xlabel' not in self.options['plotSettings']:\n self.ax.set_xlabel('x')\n else:\n self.ax.set_xlabel(self.options['plotSettings']['xlabel'])\n if 'ylabel' not in self.options['plotSettings']:\n self.ax.set_ylabel('y')\n else:\n self.ax.set_ylabel(self.options['plotSettings']['ylabel'])\n if 'zlabel' in self.options['plotSettings']:\n if self.dim == 2:\n self.raiseAWarning('zlabel keyword does not make sense in 2-D Plots!')\n elif self.dim == 3 and self.zCoordinates:\n self.ax.set_zlabel(self.options['plotSettings']['zlabel'])\n elif self.dim == 3 and self.zCoordinates:\n self.ax.set_zlabel('z')\n\n if 'legend' in self.options['plotSettings']:\n if 'label' not in plotSettings.get('attributes', {}):\n if 'attributes' not in plotSettings:\n plotSettings['attributes'] = {}\n plotSettings['attributes']['label'] = self.outStreamTypes[pltIndex] + ' ' + str(pltIndex)\n #################\n # SCATTER PLOT #\n #################\n self.raiseADebug(f'creating plot {self.name}')\n if self.outStreamTypes[pltIndex] == 'scatter':\n if 's' not in plotSettings:\n plotSettings['s'] = '20'\n if 'c' not in plotSettings:\n plotSettings['c'] = 'b'\n if 'marker' not in plotSettings:\n plotSettings['marker'] = 'o'\n if 'alpha' not in plotSettings:\n plotSettings['alpha'] = 'None'\n if 'linewidths' not in plotSettings:\n plotSettings['linewidths'] = 'None'\n if self.colorMapCoordinates[pltIndex] is not None:\n # Find the max and min colormap values\n firstKey = utils.first(self.xValues[pltIndex].keys())\n vmin = np.amin(self.colorMapValues[pltIndex][firstKey])\n vmax = np.amax(self.colorMapValues[pltIndex][firstKey])\n for key in self.xValues[pltIndex]:\n vmin = min(vmin,np.amin(self.colorMapValues[pltIndex][key]))\n vmax = max(vmax,np.amax(self.colorMapValues[pltIndex][key]))\n plotSettings['norm'] = matplotlib.colors.Normalize(vmin,vmax)\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n for yIndex in range(len(self.yValues[pltIndex][key])):\n scatterPlotOptions = {'s': ast.literal_eval(plotSettings['s']),\n 'marker': (plotSettings['marker']),\n 'alpha': ast.literal_eval(plotSettings['alpha']),\n 'linewidths': ast.literal_eval(plotSettings['linewidths'])}\n if self.colorMapCoordinates[pltIndex] is not None:\n scatterPlotOptions['norm'] = plotSettings['norm']\n scatterPlotOptions.update(plotSettings.get('attributes', {}))\n if self.dim == 2:\n if self.colorMapCoordinates[pltIndex] is not None:\n scatterPlotOptions['c'] = self.colorMapValues[pltIndex][key][xIndex]\n scatterPlotOptions['cmap'] = matplotlib.cm.get_cmap(\"winter\")\n if self.actcm:\n first = False\n else:\n first = True\n if plotSettings['cmap'] == 'None':\n self.actPlot = self.ax.scatter(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n **scatterPlotOptions)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n try:\n self.actcm.draw_all()\n # this is not good, what exception will be thrown?\n except:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n scatterPlotOptions['cmap'] = plotSettings['cmap']\n self.actPlot = self.ax.scatter(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n **scatterPlotOptions)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m, ax=self.ax)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_clim(vmin = min(self.colorMapValues[pltIndex][key][-1]), vmax=max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n else:\n if 'color' not in scatterPlotOptions:\n scatterPlotOptions['c'] = plotSettings['c']\n self.actPlot = self.ax.scatter(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n **scatterPlotOptions)\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n if self.colorMapCoordinates[pltIndex] is not None:\n scatterPlotOptions['c'] = self.colorMapValues[pltIndex][key][zIndex]\n if self.actcm:\n first = False\n else:\n first = True\n if plotSettings['cmap'] == 'None':\n self.actPlot = self.ax.scatter(self.xValues[pltIndex][key][xIndex], self.yValues[pltIndex][key][yIndex], self.zValues[pltIndex][key][zIndex], **scatterPlotOptions)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n self.actcm.draw_all()\n else:\n scatterPlotOptions['cmap'] = plotSettings['cmap']\n self.actPlot = self.ax.scatter(self.xValues[pltIndex][key][xIndex], self.yValues[pltIndex][key][yIndex], self.zValues[pltIndex][key][zIndex], **scatterPlotOptions)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap = self.actPlot.cmap, norm = self.actPlot.norm)\n m.set_clim(vmin = min(self.colorMapValues[pltIndex][key][-1]), vmax=max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n else:\n if 'color' not in scatterPlotOptions:\n scatterPlotOptions['c'] = plotSettings['c']\n self.actPlot = self.ax.scatter(self.xValues[pltIndex][key][xIndex], self.yValues[pltIndex][key][yIndex], self.zValues[pltIndex][key][zIndex], **scatterPlotOptions)\n #################\n # LINE PLOT #\n #################\n elif self.outStreamTypes[pltIndex] == 'line':\n minV = 0\n maxV = 0\n # If the user does not define an appropriate cmap, then use matplotlib's default.\n if 'cmap' not in plotSettings or plotSettings['cmap'] not in matplotlib.cm.datad:\n plotSettings['cmap'] = None\n if bool(self.colorMapValues):\n for key in self.xValues[pltIndex]:\n minV = min(minV,self.colorMapValues[pltIndex][key][-1][-1])\n maxV = max(maxV,self.colorMapValues[pltIndex][key][-1][-1])\n cmap = matplotlib.cm.ScalarMappable(matplotlib.colors.Normalize(minV, maxV, True), plotSettings['cmap'])\n cmap.set_array([minV,maxV])\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n if self.colorMapCoordinates[pltIndex] is not None:\n plotSettings['interpPointsX'] = str(max(200, len(self.xValues[pltIndex][key][xIndex])))\n for yIndex in range(len(self.yValues[pltIndex][key])):\n if self.dim == 2:\n if self.yValues[pltIndex][key][yIndex].size < 2:\n return\n xi, yi = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex], self.yValues[pltIndex][key][yIndex], plotSettings, returnCoordinate=True)\n if self.colorMapCoordinates[pltIndex] is not None:\n self.ax.plot(xi, yi, c=cmap.cmap(self.colorMapValues[pltIndex][key][-1][-1]/(maxV-minV)))\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if self.actcm is None:\n self.actcm = self.fig.colorbar(cmap)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n self.actcm.draw_all()\n else:\n self.actPlot = self.ax.plot(xi, yi, **plotSettings.get('attributes', {}))\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n if self.zValues[pltIndex][key][zIndex].size <= 3:\n return\n if self.colorMapCoordinates[pltIndex] is not None:\n self.ax.plot(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n self.zValues[pltIndex][key][zIndex],\n c=cmap.cmap(self.colorMapValues[pltIndex][key][-1][-1]/(maxV-minV)))\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if self.actcm is None:\n self.actcm = self.fig.colorbar(cmap)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n self.actcm.draw_all()\n else:\n self.actPlot = self.ax.plot(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n self.zValues[pltIndex][key][zIndex],\n **plotSettings.get('attributes', {}))\n ##################\n # HISTOGRAM PLOT #\n ##################\n elif self.outStreamTypes[pltIndex] == 'histogram':\n if 'bins' not in plotSettings:\n if self.dim == 2:\n plotSettings['bins'] = '10'\n else:\n plotSettings['bins'] = '4'\n if 'normed' not in plotSettings:\n plotSettings['normed'] = 'False'\n if 'weights' not in plotSettings:\n plotSettings['weights'] = 'None'\n if 'cumulative' not in plotSettings:\n plotSettings['cumulative'] = 'False'\n if 'histtype' not in plotSettings:\n plotSettings['histtype'] = 'bar'\n if 'align' not in plotSettings:\n plotSettings['align'] = 'mid'\n if 'orientation' not in plotSettings:\n plotSettings['orientation'] = 'vertical'\n if 'rwidth' not in plotSettings:\n plotSettings['rwidth'] = 'None'\n if 'log' not in plotSettings:\n plotSettings['log'] = 'None'\n if 'color' not in plotSettings:\n plotSettings['color'] = 'b'\n if 'stacked' not in plotSettings:\n plotSettings['stacked'] = 'None'\n if self.sourceData[0].type.strip() == 'HistorySet':\n #####################################################################################################################################\n # @MANDD: This 'if' condition has been added in order to allow the user the correctly create an histogram out of an historySet #\n # If the histogram is created out of the input variables, then the plot has an identical meaning of the one generated by a pointSet #\n # However, if the histogram is created out of the output variables, then the plot consider only the last value of the array #\n #####################################################################################################################################\n data = {}\n data['x'] = np.empty(0)\n data['y'] = np.empty(0)\n for index in range(len(self.outStreamTypes)):\n for key in self.xValues[index]:\n data['x'] = np.append(data['x'], self.xValues[index][key][0][-1])\n if self.dim == 3:\n data['y'] = np.append(data['y'], self.yValues[index][key][0][-1])\n del self.xValues[index]\n self.xValues = {}\n self.xValues[index] = {}\n self.xValues[index][0] = []\n self.xValues[index][0].append(deepcopy(data['x']))\n if self.dim == 3:\n del self.yValues[index]\n self.yValues = {}\n self.yValues[index] ={ }\n self.yValues[index][0] = []\n self.yValues[index][0].append(deepcopy(data['y']))\n\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n try:\n colorss = ast.literal_eval(plotSettings['color'])\n # unknown what specific error is anticipated here, but I don't like a bare except...\n # ast.literal_eval can raise the exceptions listed below (see library docs):\n except (ValueError, TypeError, SyntaxError, MemoryError, RecursionError):\n colorss = plotSettings['color']\n if self.dim == 2:\n self.ax.hist(self.xValues[pltIndex][key][xIndex],\n bins=ast.literal_eval(plotSettings['bins']),\n density=ast.literal_eval(plotSettings['normed']),\n weights=ast.literal_eval(plotSettings['weights']),\n cumulative=ast.literal_eval(plotSettings['cumulative']),\n histtype=plotSettings['histtype'],\n align=plotSettings['align'],\n orientation=plotSettings['orientation'],\n rwidth=ast.literal_eval(plotSettings['rwidth']),\n log=ast.literal_eval(plotSettings['log']),\n color=colorss,\n stacked=ast.literal_eval(plotSettings['stacked']),\n **plotSettings.get('attributes', {}))\n else:\n for yIndex in range(len(self.yValues[pltIndex][key])):\n hist, xedges, yedges = np.histogram2d(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n bins=ast.literal_eval(plotSettings['bins']))\n elements = (len(xedges) - 1) * (len(yedges) - 1)\n if 'x_offset' in plotSettings:\n xoffset = float(plotSettings['x_offset'])\n else:\n xoffset = 0.25\n if 'y_offset' in plotSettings:\n yoffset = float(plotSettings['y_offset'])\n else:\n yoffset = 0.25\n if 'dx' in plotSettings:\n dxs = float(plotSettings['dx'])\n else:\n dxs = (self.xValues[pltIndex][key][xIndex].max() - self.xValues[pltIndex][key][xIndex].min()) / float(plotSettings['bins'])\n if 'dy' in plotSettings:\n dys = float(plotSettings['dy'])\n else:\n dys = (self.yValues[pltIndex][key][yIndex].max() - self.yValues[pltIndex][key][yIndex].min()) / float(plotSettings['bins'])\n xpos, ypos = np.meshgrid(xedges[:-1] + xoffset, yedges[:-1] + yoffset)\n self.actPlot = self.ax.bar3d(xpos.flatten(),\n ypos.flatten(),\n np.zeros(elements),\n dxs*np.ones_like(elements),\n dys*np.ones_like(elements),\n hist.flatten(),\n color=colorss,\n zsort='average',\n **plotSettings.get('attributes', {}))\n ##################\n # STEM PLOT #\n ##################\n elif self.outStreamTypes[pltIndex] == 'stem':\n if 'linefmt' not in plotSettings:\n plotSettings['linefmt'] = 'b-'\n if 'markerfmt' not in plotSettings:\n plotSettings['markerfmt'] = 'bo'\n if 'basefmt' not in plotSettings:\n plotSettings['basefmt'] = 'r-'\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n for yIndex in range(len(self.yValues[pltIndex][key])):\n if self.dim == 2:\n self.actPlot = self.ax.stem(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n linefmt=plotSettings['linefmt'],\n markerfmt=plotSettings['markerfmt'],\n basefmt = plotSettings['linefmt'],\n use_line_collection=True,\n **plotSettings.get('attributes', {}))\n else:\n # it is a basic stem plot constructed using a standard line plot. For now we do not use the previous defined keywords...\n for zIndex in range(len(self.zValues[pltIndex][key])):\n for xx, yy, zz in zip(self.xValues[pltIndex][key][xIndex], self.yValues[pltIndex][key][yIndex], self.zValues[pltIndex][key][zIndex]):\n self.ax.plot([xx, xx], [yy, yy], [0, zz], '-')\n ##################\n # STEP PLOT #\n ##################\n elif self.outStreamTypes[pltIndex] == 'step':\n if self.dim == 2:\n if 'where' not in plotSettings:\n plotSettings['where'] = 'mid'\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n if self.xValues[pltIndex][key][xIndex].size < 2:\n xi = self.xValues[pltIndex][key][xIndex]\n else:\n xi = np.linspace(self.xValues[pltIndex][key][xIndex].min(), self.xValues[pltIndex][key][xIndex].max(), ast.literal_eval(plotSettings['interpPointsX']))\n for yIndex in range(len(self.yValues[pltIndex][key])):\n if self.yValues[pltIndex][key][yIndex].size <= 3:\n return\n yi = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex], self.yValues[pltIndex][key][yIndex], plotSettings)\n self.actPlot = self.ax.step(xi, yi, where=plotSettings['where'], **plotSettings.get('attributes', {}))\n else:\n self.raiseAWarning('step Plot not available in 3D')\n return\n ########################\n # PSEUDOCOLOR PLOT #\n ########################\n elif self.outStreamTypes[pltIndex] == 'pseudocolor':\n if self.dim == 2:\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n # Hopefully, x,y, and z are all the same length, so checking this\n # here should be good enough.\n # The problem is you cannot interpolate any amount of space if\n # you only have a single data point.\n if self.xValues[pltIndex][key][xIndex].size == 1:\n self.raiseAWarning('Nothing to Plot Yet. Continuing to next plot.')\n continue\n for yIndex in range(len(self.yValues[pltIndex][key])):\n if not self.colorMapCoordinates:\n self.raiseAMessage('pseudocolor Plot needs coordinates for color map... Returning without plotting')\n return\n for zIndex in range(len(self.colorMapValues[pltIndex][key])):\n if self.colorMapValues[pltIndex][key][zIndex].size <= 3:\n return\n xig, yig, Ci = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.colorMapValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n if plotSettings['cmap'] == 'None':\n self.actPlot = self.ax.pcolormesh(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n **plotSettings.get('attributes', {}))\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n else:\n self.actPlot = self.ax.pcolormesh(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n cmap=matplotlib.cm.get_cmap(name = plotSettings['cmap']),\n **plotSettings.get('attributes', {}))\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(ma.masked_where(np.isnan(Ci), Ci))\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n actcm = self.fig.colorbar(m)\n actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n self.raiseAWarning('pseudocolor Plot is considered a 2D plot, not a 3D!')\n return\n ########################\n # SURFACE PLOT #\n ########################\n elif self.outStreamTypes[pltIndex] == 'surface':\n if self.dim == 2:\n self.raiseAWarning('surface Plot is NOT available for 2D plots, IT IS A 3D!')\n return\n else:\n if 'rstride' not in plotSettings:\n plotSettings['rstride'] = '1'\n if 'cstride' not in plotSettings:\n plotSettings['cstride'] = '1'\n if 'antialiased' not in plotSettings:\n plotSettings['antialiased'] = 'False'\n if 'linewidth' not in plotSettings:\n plotSettings['linewidth'] = '0'\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n # Hopefully, x,y, and z are all the same length, so checking this\n # here should be good enough.\n # The problem is you cannot interpolate any amount of space if\n # you only have a single data point.\n if self.xValues[pltIndex][key][xIndex].size == 1:\n self.raiseAWarning('Nothing to Plot Yet. Continuing to next plot.')\n continue\n for yIndex in range(len(self.yValues[pltIndex][key])):\n for zIndex in range(len(self.zValues[pltIndex][key])):\n if self.zValues[pltIndex][key][zIndex].size <= 3:\n return\n if self.colorMapCoordinates[pltIndex] is not None:\n xig, yig, Ci = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.colorMapValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n xig, yig, zi = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.zValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n if self.colorMapCoordinates[pltIndex] is not None:\n if self.actcm:\n first = False\n else:\n first = True\n if plotSettings['cmap'] == 'None':\n plotSettings['cmap'] = 'jet'\n self.actPlot = self.ax.plot_surface(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n rstride=ast.literal_eval(plotSettings['rstride']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n facecolors=matplotlib.cm.get_cmap(name=plotSettings['cmap'])(ma.masked_where(np.isnan(Ci), Ci)),\n cmap=matplotlib.cm.get_cmap(name = plotSettings['cmap']),\n linewidth=ast.literal_eval(plotSettings['linewidth']),\n antialiased=ast.literal_eval(plotSettings['antialiased']),\n **plotSettings.get('attributes', {}))\n if first:\n self.actPlot.cmap = matplotlib.cm.get_cmap(name=plotSettings['cmap'])\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n m = matplotlib.cm.ScalarMappable(cmap = self.actPlot.cmap, norm = self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_clim(vmin=min(self.colorMapValues[pltIndex][key][-1]), vmax=max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n else:\n if plotSettings['cmap'] == 'None':\n self.actPlot = self.ax.plot_surface(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n rstride=ast.literal_eval(plotSettings['rstride']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n linewidth=ast.literal_eval(plotSettings['linewidth']),\n antialiased=ast.literal_eval(plotSettings['antialiased']),\n **plotSettings.get('attributes', {}))\n if 'color' in plotSettings.get('attributes', {}):\n self.actPlot.set_color = plotSettings.get('attributes', {})['color']\n else:\n self.actPlot.set_color = 'blue'\n else:\n self.actPlot = self.ax.plot_surface(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n rstride=ast.literal_eval(plotSettings['rstride']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n cmap=matplotlib.cm.get_cmap(name = plotSettings['cmap']),\n linewidth=ast.literal_eval(plotSettings['linewidth']),\n antialiased=ast.literal_eval(plotSettings['antialiased']),\n **plotSettings.get('attributes', {}))\n ########################\n # TRI-SURFACE PLOT #\n ########################\n elif self.outStreamTypes[pltIndex] == 'tri-surface':\n if self.dim == 2:\n self.raiseAWarning('TRI-surface Plot is NOT available for 2D plots, it is 3D!')\n return\n else:\n if 'color' not in plotSettings:\n plotSettings['color'] = 'b'\n if 'shade' not in plotSettings:\n plotSettings['shade'] = 'False'\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n # Hopefully, x,y, and z are all the same length, so checking this\n # here should be good enough.\n # The problem is you cannot interpolate any amount of space if\n # you only have a single data point.\n if self.xValues[pltIndex][key][xIndex].size == 1:\n self.raiseAWarning('Nothing to Plot Yet. Continuing to next plot.')\n continue\n for yIndex in range(len(self.yValues[pltIndex][key])):\n for zIndex in range(len(self.zValues[pltIndex][key])):\n metric = (self.xValues[pltIndex][key][xIndex] ** 2 + self.yValues[pltIndex][key][yIndex] ** 2) ** 0.5\n metricIndeces = np.argsort(metric)\n xs = np.zeros(self.xValues[pltIndex][key][xIndex].shape)\n ys = np.zeros(self.yValues[pltIndex][key][yIndex].shape)\n zs = np.zeros(self.zValues[pltIndex][key][zIndex].shape)\n for sindex in range(len(metricIndeces)):\n xs[sindex] = self.xValues[pltIndex][key][xIndex][metricIndeces[sindex]]\n ys[sindex] = self.yValues[pltIndex][key][yIndex][metricIndeces[sindex]]\n zs[sindex] = self.zValues[pltIndex][key][zIndex][metricIndeces[sindex]]\n surfacePlotOptions = {'color': plotSettings['color'],\n 'shade': ast.literal_eval(plotSettings['shade'])}\n surfacePlotOptions.update(plotSettings.get('attributes', {}))\n if self.zValues[pltIndex][key][zIndex].size <= 3:\n return\n if self.colorMapCoordinates[pltIndex] is not None:\n if self.actcm:\n first = False\n else:\n first = True\n if plotSettings['cmap'] == 'None':\n plotSettings['cmap'] = 'jet'\n surfacePlotOptions['cmap'] = matplotlib.cm.get_cmap(name = plotSettings['cmap'])\n self.actPlot = self.ax.plot_trisurf(xs, ys, zs, **surfacePlotOptions)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n self.actPlot.cmap = matplotlib.cm.get_cmap(name=plotSettings['cmap'])\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_clim(vmin=min(self.colorMapValues[pltIndex][key][-1]), vmax=max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n else:\n if plotSettings['cmap'] != 'None':\n surfacePlotOptions[\"cmap\"] = matplotlib.cm.get_cmap(name=plotSettings['cmap'])\n self.actPlot = self.ax.plot_trisurf(xs, ys, zs, **surfacePlotOptions)\n ########################\n # WIREFRAME PLOT #\n ########################\n elif self.outStreamTypes[pltIndex] == 'wireframe':\n if self.dim == 2:\n self.raiseAWarning('wireframe Plot is NOT available for 2D plots, IT IS A 3D!')\n return\n else:\n if 'rstride' not in plotSettings:\n plotSettings['rstride'] = '1'\n if 'cstride' not in plotSettings:\n plotSettings['cstride'] = '1'\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n # Hopefully, x,y, and z are all the same length, so checking this\n # here should be good enough.\n # The problem is you cannot interpolate any amount of space if\n # you only have a single data point.\n if self.xValues[pltIndex][key][xIndex].size == 1:\n self.raiseAWarning('Nothing to Plot Yet. Continuing to next plot.')\n continue\n for yIndex in range(len(self.yValues[pltIndex][key])):\n for zIndex in range(len(self.zValues[pltIndex][key])):\n if self.zValues[pltIndex][key][zIndex].size <= 3:\n return\n if self.colorMapCoordinates[pltIndex] is not None:\n xig, yig, Ci = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.colorMapValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n xig, yig, zi = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.zValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n if self.colorMapCoordinates[pltIndex] is not None:\n self.raiseAWarning(f'Currently, ax.plot_wireframe() in MatPlotLib version: {matplotlib.__version__} does not support a colormap! Wireframe plotted on a surface plot...')\n if self.actcm:\n first = False\n else:\n first = True\n if plotSettings['cmap'] == 'None':\n plotSettings['cmap'] = 'jet'\n self.actPlot = self.ax.plot_wireframe(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n rstride=ast.literal_eval(plotSettings['rstride']),\n cmap=matplotlib.cm.get_cmap(name = plotSettings['cmap']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n **plotSettings.get('attributes', {}))\n self.actPlot = self.ax.plot_surface(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n alpha=0.4,\n rstride=ast.literal_eval(plotSettings['rstride']),\n cmap=matplotlib.cm.get_cmap(name=plotSettings['cmap']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n **plotSettings.get('attributes', {}))\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_clim(vmin=min(self.colorMapValues[pltIndex][key][-1]), vmax=max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n else:\n if plotSettings['cmap'] == 'None':\n self.actPlot = self.ax.plot_wireframe(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n rstride=ast.literal_eval(plotSettings['rstride']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n **plotSettings.get('attributes', {}))\n if 'color' in plotSettings.get('attributes', {}):\n self.actPlot.set_color = plotSettings.get('attributes', {})['color']\n else:\n self.actPlot.set_color = 'blue'\n else:\n self.actPlot = self.ax.plot_wireframe(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n rstride=ast.literal_eval(plotSettings['rstride']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n **plotSettings.get('attributes', {}))\n ########################\n # CONTOUR PLOT #\n ########################\n elif self.outStreamTypes[pltIndex] == 'contour' or self.outStreamTypes[pltIndex] == 'filledContour':\n if self.dim == 2:\n if 'numberBins' in plotSettings:\n nbins = int(plotSettings['numberBins'])\n else:\n nbins = 5\n for key in self.xValues[pltIndex]:\n if not self.colorMapCoordinates:\n self.raiseAWarning(self.outStreamTypes[pltIndex] + ' Plot needs coordinates for color map... Returning without plotting')\n return\n for xIndex in range(len(self.xValues[pltIndex][key])):\n # Hopefully, x,y, and z are all the same length, so checking this\n # here should be good enough.\n # The problem is you cannot interpolate any amount of space if\n # you only have a single data point.\n if self.xValues[pltIndex][key][xIndex].size == 1:\n self.raiseAWarning('Nothing to Plot Yet. Continuing to next plot.')\n continue\n for yIndex in range(len(self.yValues[pltIndex][key])):\n for zIndex in range(len(self.colorMapValues[pltIndex][key])):\n if self.actcm:\n first = False\n else:\n first = True\n xig, yig, Ci = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.colorMapValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n if self.outStreamTypes[pltIndex] == 'contour':\n if plotSettings['cmap'] == 'None':\n if 'color' in plotSettings.get('attributes', {}):\n color = plotSettings.get('attributes', {})['color']\n else:\n color = 'blue'\n self.actPlot = self.ax.contour(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n nbins,\n colors=color,\n **plotSettings.get('attributes', {}))\n else:\n self.actPlot = self.ax.contour(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n nbins,\n **plotSettings.get('attributes', {}))\n else:\n if plotSettings['cmap'] == 'None':\n plotSettings['cmap'] = 'jet'\n self.actPlot = self.ax.contourf(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n nbins,\n **plotSettings.get('attributes', {}))\n self.ax.clabel(self.actPlot, inline=1, fontsize=10)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n self.actcm = self.fig.colorbar(self.actPlot, shrink=0.8, extend='both')\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap = self.actPlot.cmap, norm = self.actPlot.norm)\n m.set_clim(vmin = min(self.colorMapValues[pltIndex][key][-1]), vmax = max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n else:\n self.raiseAWarning('contour/filledContour is a 2-D plot, where x,y are the surface coordinates and colorMap vector is the array to visualize!\\n contour3D/filledContour3D are 3-D! ')\n return\n # These should be combined: ^^^ & vvv\n elif self.outStreamTypes[pltIndex] == 'contour3D' or self.outStreamTypes[pltIndex] == 'filledContour3D':\n if self.dim == 2:\n self.raiseAWarning('contour3D/filledContour3D Plot is NOT available for 2D plots, IT IS A 2D! Check \"contour/filledContour\"!')\n return\n else:\n if 'numberBins' in plotSettings:\n nbins = int(plotSettings['numberBins'])\n else:\n nbins = 5\n if 'extend3D' in plotSettings:\n ext3D = bool(plotSettings['extend3D'])\n else:\n ext3D = False\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n # Hopefully, x,y, and z are all the same length, so checking this\n # here should be good enough.\n # The problem is you cannot interpolate any amount of space if\n # you only have a single data point.\n if self.xValues[pltIndex][key][xIndex].size == 1:\n self.raiseAWarning('Nothing to Plot Yet. Continuing to next plot.')\n continue\n for yIndex in range(len(self.yValues[pltIndex][key])):\n for zIndex in range(len(self.colorMapValues[pltIndex][key])):\n if self.actcm:\n first = False\n else:\n first = True\n xig, yig, Ci = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.colorMapValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n if self.outStreamTypes[pltIndex] == 'contour3D':\n if plotSettings['cmap'] == 'None':\n if 'color' in plotSettings.get('attributes', {}):\n color = plotSettings.get('attributes', {})['color']\n else:\n color = 'blue'\n self.actPlot = self.ax.contour3D(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n nbins,\n colors=color,\n extend3d=ext3D,\n **plotSettings.get('attributes', {}))\n else:\n self.actPlot = self.ax.contour3D(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n nbins,\n extend3d=ext3D,\n cmap=matplotlib.cm.get_cmap(name=plotSettings['cmap']),\n **plotSettings.get('attributes', {}))\n else:\n if plotSettings['cmap'] == 'None':\n plotSettings['cmap'] = 'jet'\n self.actPlot = self.ax.contourf3D(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n nbins,\n cmap=matplotlib.cm.get_cmap(name=plotSettings['cmap']),\n **plotSettings.get('attributes', {}))\n self.ax.clabel(self.actPlot, inline=1, fontsize=10)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n self.actcm = self.fig.colorbar(self.actPlot, shrink = 0.8, extend = 'both')\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap = self.actPlot.cmap, norm = self.actPlot.norm)\n m.set_clim(vmin = min(self.colorMapValues[pltIndex][key][-1]), vmax = max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n ########################\n # DataMining PLOT #\n ########################\n elif self.outStreamTypes[pltIndex] == 'dataMining':\n colors = cycle(['#88CCEE', '#DDCC77', '#AA4499', '#117733', '#332288', '#999933', '#44AA99', '#882255', '#CC6677', '#CD6677', '#DC6877', '#886677', '#AA6677', '#556677', '#CD7865'])\n if 's' not in plotSettings:\n plotSettings['s'] = '20'\n if 'c' not in plotSettings:\n plotSettings['c'] = 'b'\n if 'marker' not in plotSettings:\n plotSettings['marker'] = 'o'\n if 'alpha' not in plotSettings:\n plotSettings['alpha'] = 'None'\n if 'linewidths' not in plotSettings:\n plotSettings['linewidths'] = 'None'\n clusterDict[pltIndex] = {}\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n for yIndex in range(len(self.yValues[pltIndex][key])):\n dataMiningPlotOptions = {'s': ast.literal_eval(plotSettings['s']),\n 'marker': (plotSettings['marker']),\n 'alpha': ast.literal_eval(plotSettings['alpha']),\n 'linewidths': ast.literal_eval(plotSettings['linewidths'])}\n if self.colorMapCoordinates[pltIndex] is not None:\n self.raiseAWarning('ColorMap values supplied, however DataMining plots do not use colorMap from input.')\n if plotSettings['cmap'] == 'None':\n self.raiseAWarning('ColorSet supplied, however DataMining plots do not use color set from input.')\n if 'cluster' == plotSettings['SKLtype']:\n # TODO: include the cluster Centers to the plot\n if 'noClusters' in plotSettings.get('attributes', {}):\n clusterDict[pltIndex]['noClusters'] = int(plotSettings.get('attributes', {})['noClusters'])\n plotSettings.get('attributes', {}).pop('noClusters')\n else:\n clusterDict[pltIndex]['noClusters'] = np.amax(self.clusterValues[pltIndex][1][0]) + 1\n dataMiningPlotOptions.update(plotSettings.get('attributes', {}))\n if self.dim == 2:\n clusterDict[pltIndex]['clusterValues'] = np.zeros(shape=(len(self.xValues[pltIndex][key][xIndex]), 2))\n else:\n clusterDict[pltIndex]['clusterValues'] = np.zeros(shape=(len(self.xValues[pltIndex][key][xIndex]), 3))\n clusterDict[pltIndex]['clusterValues'][:, 0] = self.xValues[pltIndex][key][xIndex]\n clusterDict[pltIndex]['clusterValues'][:, 1] = self.yValues[pltIndex][key][yIndex]\n if self.dim == 2:\n for k, col in zip(range(int(clusterDict[pltIndex]['noClusters'])), colors):\n myMembers = self.clusterValues[pltIndex][1][0] == k\n self.actPlot = self.ax.scatter(clusterDict[pltIndex]['clusterValues'][myMembers, 0],\n clusterDict[pltIndex]['clusterValues'][myMembers, 1],\n color=col,\n **dataMiningPlotOptions)\n\n # Handle all of the outlying data\n myMembers = self.clusterValues[pltIndex][1][0] == -1\n # resize the points\n dataMiningPlotOptions['s'] /= 2\n # and hollow out their markers\n if 'facecolors' in dataMiningPlotOptions:\n faceColors = dataMiningPlotOptions['facecolors']\n else:\n faceColors = None\n dataMiningPlotOptions['facecolors'] = 'none'\n\n self.actPlot = self.ax.scatter(clusterDict[pltIndex]['clusterValues'][myMembers, 0],\n clusterDict[pltIndex]['clusterValues'][myMembers, 1],\n color='#000000',\n **dataMiningPlotOptions)\n\n # Restore the plot options to their original values\n dataMiningPlotOptions['s'] *= 2\n if faceColors is not None:\n dataMiningPlotOptions['facecolors'] = faceColors\n else:\n del dataMiningPlotOptions['facecolors']\n\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n clusterDict[pltIndex]['clusterValues'][:, 2] = self.zValues[pltIndex][key][zIndex]\n for k, col in zip(range(int(clusterDict[pltIndex]['noClusters'])), colors):\n myMembers = self.clusterValues[pltIndex][1][0] == k\n self.actPlot = self.ax.scatter(clusterDict[pltIndex]['clusterValues'][myMembers, 0],\n clusterDict[pltIndex]['clusterValues'][myMembers, 1],\n clusterDict[pltIndex]['clusterValues'][myMembers, 2],\n color=col,\n **dataMiningPlotOptions)\n\n # Handle all of the outlying data\n myMembers = self.clusterValues[pltIndex][1][0] == -1\n # resize the points\n dataMiningPlotOptions['s'] /= 2\n # and hollow out their markers\n if 'facecolors' in dataMiningPlotOptions:\n faceColors = dataMiningPlotOptions['facecolors']\n else:\n faceColors = None\n dataMiningPlotOptions['facecolors'] = 'none'\n\n self.actPlot = self.ax.scatter(clusterDict[pltIndex]['clusterValues'][myMembers, 0],\n clusterDict[pltIndex]['clusterValues'][myMembers, 1],\n clusterDict[pltIndex]['clusterValues'][myMembers, 2],\n color='#000000',\n **dataMiningPlotOptions)\n\n # Restore the plot options to their original values\n dataMiningPlotOptions['s'] *= 2\n if faceColors is not None:\n dataMiningPlotOptions['facecolors'] = faceColors\n else:\n del dataMiningPlotOptions['facecolors']\n\n elif 'bicluster' == plotSettings['SKLtype']:\n self.raiseAnError(IOError, 'SKLType Bi-Cluster Plots are not implemented yet!..')\n elif 'mixture' == plotSettings['SKLtype']:\n if 'noMixtures' in plotSettings.get('attributes', {}):\n clusterDict[pltIndex]['noMixtures'] = int(plotSettings.get('attributes', {})['noMixtures'])\n plotSettings.get('attributes', {}).pop('noMixtures')\n else:\n clusterDict[pltIndex]['noMixtures'] = np.amax(self.mixtureValues[pltIndex][1][0]) + 1\n if self.dim == 3:\n self.raiseAnError(IOError, 'SKLType Mixture Plots are only available in 2-Dimensions')\n else:\n clusterDict[pltIndex]['mixtureValues'] = np.zeros(shape = (len(self.xValues[pltIndex][key][xIndex]), 2))\n clusterDict[pltIndex]['mixtureValues'][:, 0] = self.xValues[pltIndex][key][xIndex]\n clusterDict[pltIndex]['mixtureValues'][:, 1] = self.yValues[pltIndex][key][yIndex]\n if 'mixtureCovars' in plotSettings.get('attributes', {}):\n split = self.__splitVariableNames('mixtureCovars', (pltIndex, 0))\n # mixtureCovars = self.sourceData[pltIndex].getParam(split[1], split[2], nodeId = 'ending')\n plotSettings.get('attributes', {}).pop('mixtureCovars')\n # else:\n # mixtureCovars = None\n if 'mixtureMeans' in plotSettings.get('attributes', {}):\n split = self.__splitVariableNames('mixtureMeans', (pltIndex, 0))\n # mixtureMeans = self.sourceData[pltIndex].getParam(split[1], split[2], nodeId = 'ending')\n plotSettings.get('attributes', {}).pop('mixtureMeans')\n # else:\n # mixtureMeans = None\n # mixtureCovars.reshape(3, 4)\n # mixtureMeans.reshape(3, 4)\n # for i, (mean, covar, col) in enumerate(zip(mixtureMeans, mixtureCovars, colors)):\n for i, col in zip(range(clusterDict[pltIndex]['noMixtures']), colors):\n if not np.any(self.mixtureValues[pltIndex][1][0] == i):\n continue\n myMembers = self.mixtureValues[pltIndex][1][0] == i\n self.actPlot = self.ax.scatter(clusterDict[pltIndex]['mixtureValues'][myMembers, 0],\n clusterDict[pltIndex]['mixtureValues'][myMembers, 1],\n color=col,\n **dataMiningPlotOptions)\n elif 'manifold' == plotSettings['SKLtype']:\n if self.dim == 2:\n manifoldValues = np.zeros(shape=(len(self.xValues[pltIndex][key][xIndex]), 2))\n else:\n manifoldValues = np.zeros(shape=(len(self.xValues[pltIndex][key][xIndex]), 3))\n manifoldValues[:, 0] = self.xValues[pltIndex][key][xIndex]\n manifoldValues[:, 1] = self.yValues[pltIndex][key][yIndex]\n if 'clusterLabels' in plotSettings.get('attributes', {}):\n split = self.__splitVariableNames('clusterLabels', (pltIndex, 0))\n clusterDict[pltIndex]['clusterLabels'] = self.sourceData[pltIndex].getParam(split[1], split[2], nodeId = 'ending')\n plotSettings.get('attributes', {}).pop('clusterLabels')\n else:\n clusterDict[pltIndex]['clusterLabels'] = None\n if 'noClusters' in plotSettings.get('attributes', {}):\n clusterDict[pltIndex]['noClusters'] = int(plotSettings.get('attributes', {})['noClusters'])\n plotSettings.get('attributes', {}).pop('noClusters')\n else:\n clusterDict[pltIndex]['noClusters'] = np.amax(self.clusterValues[pltIndex][1][0]) + 1\n if self.clusterValues[pltIndex][1][0] is not None:\n if self.dim == 2:\n for k, col in zip(range(clusterDict[pltIndex]['noClusters']), colors):\n myMembers = self.clusterValues[pltIndex][1][0] == k\n self.actPlot = self.ax.scatter(manifoldValues[myMembers, 0],\n manifoldValues[myMembers, 1],\n color=col,\n **dataMiningPlotOptions)\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n manifoldValues[:, 2] = self.zValues[pltIndex][key][zIndex]\n for k, col in zip(range(clusterDict[pltIndex]['noClusters']), colors):\n myMembers = self.clusterValues[pltIndex][1][0] == k\n self.actPlot = self.ax.scatter(manifoldValues[myMembers, 0],\n manifoldValues[myMembers, 1],\n manifoldValues[myMembers, 2],\n color=col,\n **dataMiningPlotOptions)\n else:\n if self.dim == 2:\n self.actPlot = self.ax.scatter(manifoldValues[:, 0],\n manifoldValues[:, 1],\n **dataMiningPlotOptions)\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n manifoldValues[:, 2] = self.zValues[pltIndex][key][zIndex]\n self.actPlot = self.ax.scatter(manifoldValues[:, 0],\n manifoldValues[:, 1],\n manifoldValues[:, 2],\n **dataMiningPlotOptions)\n elif 'decomposition' == plotSettings['SKLtype']:\n if self.dim == 2:\n decompositionValues = np.zeros(shape = (len(self.xValues[pltIndex][key][xIndex]), 2))\n else:\n decompositionValues = np.zeros(shape = (len(self.xValues[pltIndex][key][xIndex]), 3))\n decompositionValues[:, 0] = self.xValues[pltIndex][key][xIndex]\n decompositionValues[:, 1] = self.yValues[pltIndex][key][yIndex]\n if 'noClusters' in plotSettings.get('attributes', {}):\n clusterDict[pltIndex]['noClusters'] = int(plotSettings.get('attributes', {})['noClusters'])\n plotSettings.get('attributes', {}).pop('noClusters')\n else:\n clusterDict[pltIndex]['noClusters'] = np.amax(self.clusterValues[pltIndex][1][0]) + 1\n if self.clusterValues[pltIndex][1][0] is not None:\n if self.dim == 2:\n for k, col in zip(range(clusterDict[pltIndex]['noClusters']), colors):\n myMembers = self.clusterValues[pltIndex][1][0] == k\n self.actPlot = self.ax.scatter(decompositionValues[myMembers, 0],\n decompositionValues[myMembers, 1],\n color=col,\n **dataMiningPlotOptions)\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n decompositionValues[:, 2] = self.zValues[pltIndex][key][zIndex]\n for k, col in zip(range(clusterDict[pltIndex]['noClusters']), colors):\n myMembers = self.clusterValues[pltIndex][1][0] == k\n self.actPlot = self.ax.scatter(decompositionValues[myMembers, 0],\n decompositionValues[myMembers, 1],\n decompositionValues[myMembers, 2],\n color=col,\n **dataMiningPlotOptions)\n else:\n # no ClusterLabels\n if self.dim == 2:\n self.actPlot = self.ax.scatter(decompositionValues[:, 0],\n decompositionValues[:, 1],\n **dataMiningPlotOptions)\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n decompositionValues[:, 2] = self.zValues[pltIndex][key][zIndex]\n self.actPlot = self.ax.scatter(decompositionValues[:, 0],\n decompositionValues[:, 1],\n decompositionValues[:, 2],\n **dataMiningPlotOptions)\n else:\n # Let's try to \"write\" the code for the plot on the fly\n self.raiseAWarning('Trying to create a non-predefined plot of type ' + self.outStreamTypes[pltIndex] + '. If this fails, please refer to the and/or the related matplotlib method specification.')\n kwargs = {}\n for kk in plotSettings:\n if kk != 'attributes' and kk != self.outStreamTypes[pltIndex]:\n try:\n kwargs[kk] = ast.literal_eval(plotSettings[kk])\n except ValueError:\n kwargs[kk] = plotSettings[kk]\n try:\n if self.dim == 2:\n customFunctionCall = getattr(self.ax, self.outStreamTypes[pltIndex])\n else:\n customFunctionCall = getattr(self.ax, self.outStreamTypes[pltIndex])\n self.actPlot = customFunctionCall(**kwargs)\n except AttributeError as ae:\n self.raiseAnError(RuntimeError, '<' + str(ae) + '> -> in execution custom plot \"' + self.outStreamTypes[pltIndex] + '\" in Plot ' + self.name + '.\\nSTREAM MANAGER: ERROR -> command has been called in the following way: ' + 'ax.' + self.outStreamTypes[pltIndex])\n\n if 'legend' in self.options['plotSettings']:\n self.fig.legend(**self.options['plotSettings']['legend'])\n\n # SHOW THE PICTURE\n self.__executeActions()\n self.fig.canvas.draw_idle()\n\n if 'screen' in self.destinations and display:\n def handle_close(event):\n \"\"\"\n This method is aimed to handle the closing of figures (overall when in interactive mode)\n @ In, event, instance, the event to close\n @ Out, None\n \"\"\"\n self.fig.canvas.stop_event_loop()\n self.raiseAMessage('Closed Figure')\n self.fig.canvas.mpl_connect('close_event', handle_close)\n # self.plt.pause(1e-6)\n # The following code is extracted from pyplot.pause without actually\n # needing to force the code to sleep, according to MPL's documentation,\n # this feature is experimental, hopefully by not calling the pause\n # function, we can obtain consistent results.\n # We are skipping a few of the sanity checks done in that function,\n # since we are sure we have an interactive backend and access to the\n # correct type of canvas and figure.\n self.fig.canvas.draw()\n # If your graphs are unresponsive to user input, you may want to consider\n # adjusting this timeout, to allow more time for the input to be handled.\n self.fig.canvas.start_event_loop(1e-3)\n\n # self.fig.canvas.flush_events()\n\n for fileType in self.destinations:\n if fileType == 'screen':\n continue\n\n if not self.overwrite:\n prefix = str(self.counter) + '-'\n else:\n prefix = ''\n\n if len(self.filename) > 0:\n name = self.filename\n else:\n name = prefix + self.name + '_' + str(self.outStreamTypes).replace(\"'\", \"\").replace(\"[\", \"\").replace(\"]\", \"\").replace(\",\", \"-\").replace(\" \", \"\")\n\n if self.subDirectory is not None:\n name = os.path.join(self.subDirectory,name)\n\n self.fig.savefig(name + '.' + fileType, format=fileType)\n\n if 'screen' not in self.destinations:\n plt.close(fig=self.fig)\n\n gc.collect()",
"def loadData(self):\r\n self.samplerate = self.app.samplerate\r\n self.sensors = self.app.sensors\r\n self.sensorMask = self.app.sensorMask\r\n self.measurements = self.app.measurements\r\n\r\n # Get min and max data points\r\n for sens in self.sensor_ids:\r\n try:\r\n for i in range(1,self.measurements):\r\n if float(self.app.data[i][sens].text) < self.sensor_range[0]:\r\n self.sensor_range[0] = float(self.app.data[i][sens].text)\r\n elif float(self.app.data[i][sens].text) > self.sensor_range[1]:\r\n self.sensor_range[1] = float(self.app.data[i][sens].text)\r\n except:\r\n print(self.app.data)\r\n \r\n # Set x scale from 0 to end of track\r\n self.scalex = [0,self.measurements]\r\n## self.scalex = [0,self.w/2]\r\n # Set y scale to maximum sensor measurement\r\n self.setScaleY(self.sensor_range[0], self.sensor_range[1])",
"def force_show(sub_Idx):\n force_path = './dataset/subj_' + f'{sub_Idx:02d}'+ '/forces/force_' + f'{sub_Idx:02d}' + '.txt'\n image_path = './dataset/subj_' + f'{sub_Idx:02d}'+ '/images/'\n force_num = len(glob.glob(image_path + '*.jpg'))\n force_list = load_force_txt(force_path,force_num)\n print('showing '+f'{force_num:03d}'+ ' raw forces for subject ' + f'{sub_Idx:02d}')\n\n fig = plt.figure(figsize = (10, 7)) \n ax = plt.axes(projection =\"3d\") \n\n for x, y, z in force_list:\n ax.scatter3D(x, y, z, color = \"green\")\n ax.set_xlabel('X-axis', fontweight ='bold') \n ax.set_ylabel('Y-axis', fontweight ='bold') \n ax.set_zlabel('Z-axis', fontweight ='bold')\n plt.title(\"3D force data\") \n plt.show()"
] | [
"0.72387475",
"0.60290945",
"0.6005947",
"0.5955035",
"0.58564967",
"0.58080417",
"0.58020985",
"0.57991344",
"0.5739967",
"0.5708146",
"0.5702322",
"0.5696365",
"0.5665857",
"0.56347615",
"0.5624619",
"0.56230587",
"0.5581395",
"0.5572585",
"0.55696213",
"0.5556687",
"0.55472773",
"0.5542616",
"0.55224085",
"0.5511036",
"0.55051017",
"0.54762614",
"0.5469076",
"0.5468142",
"0.54540175",
"0.54379845"
] | 0.6846585 | 1 |
Returns a dictionary of RefSeq genes (by chromosome and strand with 'name' parameter as key) from UCSC genome browser (equivalent to RefSeq ID) | def fetchRefSeq(genome = 'hg18',lookupval = 'name'):
cursor=gbdbConnect(gbdbname=genome)
select="SELECT * FROM refGene"
cursor.execute(select)
rows=cursor.fetchall()
output={}
for chr in genomelib.chr_names:
output[chr]={}
output[chr]['+']={}
output[chr]['-']={}
for row in rows:
if row['chrom'] in genomelib.chr_names:
output[row['chrom']][row['strand']][row[lookupval]]=row
return output | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fetchRefSeqByChrom(RefSeqBed=\"/fg/compbio-t/lgoff/magda/references/human/transcriptome/hg18/hg18_RefSeq.bed\"):\n res = {}\n iter = parseBed(RefSeqBed)\n for i in iter:\n res.setdefault(i.chr,{})\n res[i.chr].setdefault(i.strand,[])\n res[i.chr][i.strand].append(i)\n return res",
"def fetchRefSeqDict(RefSeqBed=\"/fg/compbio-t/lgoff/magda/references/human/transcriptome/hg18/hg18_RefSeq.bed\"):\n res = {}\n iter = parseBed(RefSeqBed)\n for i in iter:\n res[i.name] = i\n return res",
"def load_gene_dict(reference_genbank_name=\"data/covid-19-genbank.gb\"):\n recs = [rec for rec in SeqIO.parse(reference_genbank_name, \"genbank\")]\n gene_dict = {}\n for rec in recs:\n feats = [feat for feat in rec.features if feat.type == \"CDS\"]\n for feat in feats:\n content = '{}: {}'.format(feat.qualifiers['protein_id'][0], feat.qualifiers['product'][0])\n if feat.qualifiers['product'][0] == 'ORF1a polyprotein':\n continue\n if feat.location_operator == 'join':\n for item in feat.location.parts:\n key = (item.start.position, item.end.position)\n if 'translation' in feat.qualifiers:\n seq = feat.qualifiers['translation']\n if len(seq) == 1:\n amino_acid_seq = seq[0]\n gene_dict[key] = (content, amino_acid_seq)\n else:\n key = (feat.location.start.position, feat.location.end.position)\n if 'translation' in feat.qualifiers:\n seq = feat.qualifiers['translation']\n if len(seq) == 1:\n amino_acid_seq = seq[0]\n gene_dict[key] = (content, amino_acid_seq)\n return gene_dict",
"def gather_strand_by_geneID_dict(genome_gtf):\n strand_by_geneID_dict = {}\n with open(genome_gtf) as f: \n for line in f: \n current_line = line.split('\\t')\n if current_line[2] == \"CDS\":\n current_orf = current_line[8].split(';')[2].split()[1].strip('\\\"')\n current_strand = current_line[6]\n strand_by_geneID_dict[current_orf] = current_strand\n return strand_by_geneID_dict",
"def get_reference_seq_ucsc(chrom, start, end):\n if chrom.startswith('chr'):\n chrom = chrom.replace('chr', '')\n request = 'http://genome.ucsc.edu/cgi-bin/das/hg19/dna?segment=chr{}:{},{}'.format(chrom, start, end)\n try:\n dna = xmltodict.parse(urlopen(request).read())['DASDNA']['SEQUENCE']['DNA']['#text'].replace('\\n', '')\n except (URLError, ExpatError) as e:\n print('Could not open UCSC url. Please check your internet connection.\\n{}\\n{}'.format(request, e.message))\n dna = \"n\" * (start - end)\n return dna",
"def get_chromosome_names(bam):\n\n # ref_tid = {str(name): int(bam.get_tid(name)) + 1 for name in bam.get_reference_name}\n\n ref_tid = {} # 'I' | 0, 'II' | 1, ...\n for i in range(bam.nreferences): #if bam.nreferences does not work, use range(17) #16 chromosomes and the mitochondrial chromosome\n ref_name = bam.get_reference_name(i)\n ref_tid[ref_name] = bam.get_tid(ref_name)\n\n return ref_tid",
"def _chrom_names(fasta_file):\n from pysam import FastaFile\n with FastaFile(fasta_file) as fa:\n chroms = list(fa.references)\n return chroms",
"def getseq(genomefasta):\n genomedict = {}\n for i in SeqIO.parse(open(genomefasta), \"fasta\"):\n genomedict[i.id] = str(i.seq)\n return genomedict",
"def GenomeReader(GenomeFile):\n GenomeScaffolds = {}\n key = []\n with open(GenomeFile, 'r') as f:\n for line in f:\n line = line.strip()\n if line.startswith(\">\"):\n NamedSeq = line.replace('>', '')\n key.append(NamedSeq)\n GenomeScaffolds[NamedSeq] = \"\"\n else:\n GenomeScaffolds[NamedSeq] += line\n return GenomeScaffolds # Returns a Dictionary object",
"def get_reference_seq(chrom, start, end, seq_dict=None):\n if not seq_dict:\n return get_reference_seq_ucsc(chrom, start, end)\n # ex. start = 1, end = 3, return [0,1,2]\n # because computer scientists are exclusive starting from 0 but biologists are inclusive starting from 1\n start = int(start) - 1\n try:\n dna = str(seq_dict[chrom][start:end].seq)\n except IndexError as e:\n raise Exception(\"Error: Could not find that sequence: %s\" % str(e))\n except KeyError as e:\n print(\"No chromosome named: %s\\nTrying UCSC...\" % str(e))\n dna = get_reference_seq(chrom, start, end)\n return dna.lower()",
"def get_chromosome_object(agp):\n\n chr = {}\n\n agp = agp.split('\\n')\n\n for i, line in enumerate(agp):\n if len(line) == 0 or line[0] == '#':\n continue\n tabs = line.split(\"\\t\")\n acc = tabs[0]\n start = int(tabs[1])\n stop = int(tabs[2])\n comp_type = tabs[6]\n if 'acc' not in chr:\n chr['accession'] = acc\n chr['type'] = 'nuclear'\n if comp_type == 'centromere':\n chr['centromere'] = {\n 'start': start,\n 'length': stop - start\n }\n if i == len(agp) - 2:\n chr['length'] = stop\n return chr",
"def GetGeneName(arg):\n\n genbank = ChromUnzip(arg)\n \n p1=re.compile(r'(?:ACCESSION\\s+)(\\w+\\d+)')\n p6=re.compile(r'(?:/gene=\")(.+?)(?:\"\\s+)')\n\n gene_name_dict={}\n \n for entry in genbank:\n gene_list=[] \n gene_it_6=p6.finditer(entry)\n gene_it_1=p1.finditer(entry) \n for hit in gene_it_6:\n gene_list.append(hit.group(1))\n for item in gene_it_1:\n gene_name_dict[item.group(1)]=gene_list[0]\n \n return gene_name_dict",
"def get_genome_hgvs(genome_build, ref_seq, cdna_hgvs):\n query = \"{refseq}:{cdna}\".format(refseq=ref_seq, cdna=cdna_hgvs)\n url = \"https://mutalyzer.nl/services/?wsdl\"\n\n client = Client(url, cache=None)\n response = client.service.numberConversion(genome_build, query)\n find_hgvs = re.search(\"\\\".*\\\"\", str(response))\n if find_hgvs:\n genome_hgvs = find_hgvs.group(0)[1:-1]\n else:\n genome_hgvs = \"\"\n\n return genome_hgvs",
"def _get_references_by_species(self) -> dict:\n fasta_parser = FastaParser()\n references_by_species = {}\n for (\n species,\n reference_files,\n ) in self._pathcreator.ref_seq_paths_by_species.items():\n references_by_species[species] = []\n for reference_file in reference_files:\n with open(reference_file, \"r\") as fasta_fh:\n for header, sequence in fasta_parser.entries(fasta_fh):\n header_id = fasta_parser.header_id(header)\n references_by_species[species].append(header_id)\n return references_by_species",
"def hostRefSeq(chr,start,end,strand):\n cursor=gbdbConnect()\n selSQL=\"SELECT * from refGene WHERE chrom='%s' AND txStart<='%d' AND txEnd>='%d'\" % (chr,int(start),int(end))\n cursor.execute(selSQL)\n rows=cursor.fetchall()\n results=[]\n if cursor.rowcount==0:\n return False\n else:\n for row in rows:\n results.append(row)\n return results",
"def get_fastg_seqs_dict(fastg_name, G):\n fp = open(fastg_name, 'r')\n seqs = {}\n for name,seq,qual in readfq(fp):\n name_parts = re.sub('[:,]',\" \", name[:-1]).split()\n node = name_parts[0]\n seqs[node] = seq\n return seqs",
"def get_gc(request, genome, chrom, start, end):\n logger.debug(\"annotation_server.get_gc called for genome: %s chromosome: %s:%s-%s\" % (genome, chrom, start, end)) \n \n if genome in SUPPORTED_GENOMES:\n current_table = eval(genome+ \"_GC\")\n curr_vals = current_table.objects.filter(\n Q(chrom__iexact=chrom),\n Q(position__range=(start, end)),\n ).values('chrom', 'position', 'value')\n data = ValuesQuerySetToDict(curr_vals)\n return HttpResponse(data, 'application/json')\n else:\n return HttpResponse(status=400)",
"def find_GC_content(fasta_file_name):\n\twith open(fasta_file_name) as fasta:\n\t\tGC_content = {}\n\t\tfor line in fasta:\n\n\t\t\t# Each line (bar the last) ends with '\\n'\n\t\t\tloc_line = line.replace('\\n', '')\n\n\t\t\t# Finds '>' at opening of line (FASTA seq title)\n\t\t\tif re.match(r'^>', loc_line):\n\t\t\t\tGC_content[loc_line] = 0\n\t\t\t\tG_count = 0\n\t\t\t\tC_count = 0\n\t\t\t\tcount = 0\n\t\t\t\tcurrent = loc_line\n\t\t\telse:\n\t\t\t\tG_count += loc_line.count('G')\n\t\t\t\tC_count += loc_line.count('C')\n\t\t\t\tcount += len(loc_line)\n\t\t\t\tGC_content[current] = float((G_count + C_count)) / count\n\treturn GC_content",
"def refseqTSS():\n refSeqs=fetchRefSeq()\n output={}\n for chr in genomelib.chr_names:\n output[chr]=[]\n for strand in ['+','-']:\n for k in refSeqs[chr][strand]:\n v=refSeqs[chr][strand][k]\n if v['strand'] == \"+\":\n tss=v['txStart']\n elif v['strand'] == \"-\":\n tss=v['txEnd']\n tssInfo=(v['name'],v['chrom'],int(tss),v['strand'])\n output[chr].append(tssInfo)\n output[chr].sort(lambda x,y:cmp(x[2],y[2]))\n return output",
"def get_ref_seq_dict(ref_seq):\n return SeqIO.to_dict(SeqIO.parse(ref_seq, 'fasta')) if ref_seq else None",
"def chrDIC(df):\n chr_names=df['chr'].unique().tolist()\n d_chr = d_gtf_chr = {chrom : df[df[\"chr\"]==chrom] for chrom in chr_names}\n return d_chr",
"def __init__(self):\n self.chroms = {}\n self.chroms_list = []",
"def dumpchroms(release, output_path):\n global account, chnames\n if not os.path.isdir(output_path):\n raise ValueError(\"Create the output dir first\")\n \n runlog_path = os.path.join(output_path, 'dump_genome.log')\n LOGGER.log_file_path = runlog_path\n \n human = Genome(Species='human', Release=release, account=account)\n for i,n in enumerate(chnames, start=1):\n nx = \"Chr%02d_%s\" % (i, n) + \".fa\"\n print \"dumping chromosome file\", nx\n print \" fetching ...\"\n sys.stdout.flush()\n ch = human.getRegion(CoordName=n)\n ch.Seq.Name = \"Chr\" + n\n print \" dumping ...\"\n sys.stdout.flush()\n\n # output to temporary file name ... then rename once output completes\n # .. safer when working with large files.\n xoutpath = os.path.join(output_path, \"z-\"+nx)\n outpath = os.path.join(output_path, nx)\n with open(xoutpath, \"w\") as dst:\n dst.write(ch.Seq.toFasta()+\"\\n\")\n os.rename(xoutpath, outpath)\n LOGGER.output_file(outpath)\n \n print \"Done.\"\n sys.stdout.flush()\n return",
"def get_genes(request, genome, chrom, start, end):\n logger.debug(\"annotation_server.get_genes called for genome: %s chromosome: %s\" % (genome, chrom)) \n \n if genome in SUPPORTED_GENOMES:\n current_table = eval(genome+ \"_EnsGene\")\n curr_vals = current_table.objects.filter(\n Q(chrom__iexact=chrom),\n Q(cdsStart__range=(start, end)) | Q(cdsEnd__range=(start, end))\n ).values('name', 'chrom', 'strand', 'txStart', 'txEnd', 'cdsStart', 'cdsEnd', 'exonCount', 'exonStarts', 'exonEnds')\n data = ValuesQuerySetToDict(curr_vals)\n return HttpResponse(data, 'application/json')\n else:\n return HttpResponse(status=400)\n \n \n # Postbio query\n #cursor = connection.cursor() \n #query = \"\"\"SELECT x.symbol, r.name, #<r.region as start, #>r.region as end, case when r.same_orient then '+' else '-' end as strand, #<r.cds as cds_start, #>r.cds as cds_end from dm3.flybase r join dm3.flyBase2004Xref x on r.name = x.name JOIN (select id, name from dm3.sequence where name = '%s') n ON n.id = r.seq_id and region && int_interval '(%s,%s)' order by region\"\"\" % (chrom, start, end)\n #cursor.execute(query) \n #return HttpResponse(cursor_to_json(cursor), 'application/javascript')",
"def gene_descriptors(civic_gid19):\n return [civic_gid19]",
"def _parse_genes(chrom: str, db: FeatureDB) -> List[Dict]:\n parsed_genes = []\n for gene in db.region(\n seqid=chrom, featuretype=[GFF3GeneFeatureTypes.GENE.value, GFF3GeneFeatureTypes.PSEUDOGENE.value]\n ):\n gene_id = gene.attributes.get(\"gene_id\", [None])[0]\n locus_tag = gene.attributes.get(\"locus_tag\", [None])[0]\n gene_symbol = gene.attributes.get(\"gene_name\", [gene.attributes.get(\"gene_symbol\", None)])[0]\n gene_biotype = gene.attributes.get(\"gene_biotype\", [gene.attributes.get(\"gene_type\", None)])[0]\n gene_qualifiers = {x: y for x, y in gene.attributes.items() if not BioCantorGFF3ReservedQualifiers.has_value(x)}\n\n if Biotype.has_name(gene_biotype):\n gene_biotype = Biotype[gene_biotype]\n elif gene_biotype:\n gene_qualifiers[\"provided_biotype\"] = [gene_biotype]\n gene_biotype = None\n\n transcripts = []\n for i, transcript in enumerate(db.children(gene, level=1)):\n\n transcript_id = transcript.attributes.get(\"transcript_id\", [None])[0]\n transcript_symbol = transcript.attributes.get(\n \"transcript_name\", [gene.attributes.get(\"transcript_name\", None)]\n )[0]\n transcript_qualifiers = {\n x: y for x, y in transcript.attributes.items() if not BioCantorGFF3ReservedQualifiers.has_value(x)\n }\n provided_transcript_biotype = gene.attributes.get(\n \"transcript_biotype\", [gene.attributes.get(\"transcript_type\", None)]\n )[0]\n\n if Biotype.has_name(provided_transcript_biotype):\n transcript_biotype = Biotype[provided_transcript_biotype]\n else:\n # keep track of what they gave us, that did not match the enum\n if provided_transcript_biotype:\n transcript_qualifiers[\"provided_transcript_biotype\"] = provided_transcript_biotype\n # use the gene biotype\n transcript_biotype = gene_biotype\n\n if locus_tag is not None:\n if transcript_id is None:\n transcript_id = locus_tag\n if transcript_symbol is None:\n transcript_symbol = locus_tag\n\n exons = []\n cds = []\n for feature in db.children(transcript, level=1):\n if feature.featuretype == GFF3GeneFeatureTypes.EXON.value:\n exons.append(feature)\n elif feature.featuretype == GFF3GeneFeatureTypes.CDS.value:\n cds.append(feature)\n else:\n logger.warning(f\"Found non CDS/exon child of transcript in feature: {feature}\")\n\n # This gene has only a CDS/exon feature as its direct child\n # therefore, we really have one interval here\n if len(exons) == 0:\n if transcript.featuretype not in [\n GFF3GeneFeatureTypes.CDS.value,\n GFF3GeneFeatureTypes.EXON.value,\n ]:\n logger.warning(f\"Gene child feature has type {transcript.featuretype}; skipping\")\n continue\n logger.info(f\"gene {gene_id} had no transcript feature\")\n if transcript.featuretype == GFF3GeneFeatureTypes.CDS.value:\n exons = cds = [transcript]\n else:\n exons = [transcript]\n\n exons = sorted(exons, key=lambda e: e.start)\n exon_starts = [x.start - 1 for x in exons]\n exon_ends = [x.end for x in exons]\n start = exon_starts[0]\n end = exon_ends[-1]\n assert start <= end\n strand = Strand.from_symbol(transcript.strand)\n\n if len(cds) == 0:\n cds_starts = cds_ends = cds_frames = None\n protein_id = product = None\n else:\n # sort by start and end in case two blocks start at the same position\n cds = sorted(cds, key=lambda c: (c.start, c.end))\n cds_starts = [x.start - 1 for x in cds]\n cds_ends = [x.end for x in cds]\n cds_frames = [CDSPhase.from_int(int(f.frame)).to_frame().name for f in cds]\n # NCBI encodes protein IDs and products on the CDS feature\n protein_id = cds[0].attributes.get(\"protein_id\", [None])[0]\n product = cds[0].attributes.get(\"product\", [None])[0]\n\n tx = dict(\n exon_starts=exon_starts,\n exon_ends=exon_ends,\n strand=strand.name,\n cds_starts=cds_starts,\n cds_ends=cds_ends,\n cds_frames=cds_frames,\n qualifiers=filter_and_sort_qualifiers(transcript_qualifiers),\n is_primary_tx=False,\n transcript_id=transcript_id,\n transcript_type=transcript_biotype.name if transcript_biotype else transcript_biotype,\n transcript_symbol=transcript_symbol,\n sequence_name=chrom,\n protein_id=protein_id,\n product=product,\n )\n transcripts.append(tx)\n\n if len(transcripts) == 0:\n # infer a transcript for a gene\n logger.info(f\"Inferring a transcript for gene {gene_symbol}\")\n tx = dict(\n exon_starts=[gene.start],\n exon_ends=[gene.end],\n strand=Strand.from_symbol(gene.strand).name,\n qualifiers=gene_qualifiers,\n transcript_type=gene_biotype.name if gene_biotype else gene_biotype,\n transcript_id=gene_id,\n sequence_name=gene.seqid,\n )\n transcripts.append(tx)\n\n gene = dict(\n transcripts=transcripts,\n gene_id=gene_id,\n gene_symbol=gene_symbol,\n locus_tag=locus_tag,\n gene_type=gene_biotype.name if gene_biotype else gene_biotype,\n qualifiers=filter_and_sort_qualifiers(gene_qualifiers),\n sequence_name=chrom,\n )\n\n parsed_genes.append(gene)\n return parsed_genes",
"def fetchRefSeqIntervalsIndexed(genome='hg18',proteinCodingOnly=False,verbose=False):\n cursor=gbdbConnect(gbdbname=genome)\n select=\"SELECT * FROM refGene\"\n if verbose:\n sys.stderr.write(\"Fetching RefSeq Sequences...\\n\")\n cursor.execute(select)\n rows=cursor.fetchall()\n output={}\n for chr in genomelib.chr_names:\n output[chr]={}\n output[chr]['+']=[]\n output[chr]['-']=[]\n if verbose:\n sys.stderr.write(\"Creating index by chr and strand...\\n\")\n \n for row in rows:\n if proteinCodingOnly and not row['name'].startswith('NM'):\n continue\n try:\n exonStarts = map(int,row['exonStarts'].rstrip().split(\",\")[:-1])\n exonEnds = map(int,row['exonEnds'].rstrip().split(\",\")[:-1])\n except:\n print \"\\t\".join([\"%s:%s\" % (k,v) for k,v in row.iteritems()])\n start = int(row['txStart'])\n exonOffsets = [x-start for x in exonStarts]\n exonLengths = []\n for i in xrange(len(exonStarts)):\n exonLengths.append(exonEnds[i]-exonStarts[i]+1)\n if row['chrom'] in genomelib.chr_names:\n output[row['chrom']][row['strand']].append(intervallib.SplicedInterval(row['chrom'],row['txStart'],row['txEnd'],row['strand'],\",\".join([str(x) for x in exonLengths]),\",\".join([str(x) for x in exonOffsets]),name=row['name2']))\n \n #Sort \n if verbose:\n sys.stderr.write(\"Sorting:\\n\")\n tstart = time.time()\n for key in output.keys():\n if verbose:\n sys.stderr.write(\"\\t%s\\t\" % key)\n output[key]['+'].sort()\n output[key]['-'].sort()\n tend = time.time()\n if verbose:\n sys.stderr.write('%0.2f sec\\n' % (tend-tstart))\n tstart = time.time()\n return output",
"def populate_crossreference_dictionary(self):\n master_crossreference_dictionary = dict()\n\n # If additional crossreferences need to be used to find interactors, they can be added here.\n # Use the crossreference prefix as the dictionary name.\n # Also add a regex entry to the resolve_identifier function.\n master_crossreference_dictionary['UniProtKB'] = dict()\n master_crossreference_dictionary['ENSEMBL'] = dict()\n master_crossreference_dictionary['NCBI_Gene'] = dict()\n master_crossreference_dictionary['RefSeq'] = dict()\n\n for key in master_crossreference_dictionary:\n self.logger.info('Querying for %s cross references.', key)\n with Neo4jHelper().run_single_parameter_query(self.query_xrefs, key) as result:\n for record in result:\n cross_ref_record = None\n # Modify the cross reference ID to match the PSI MITAB format if necessary.\n # So far, this is just converting 'NCBI_Gene' to 'entrez gene/locuslink'.\n if record['cr.globalCrossRefId'].startswith('NCBI_Gene'):\n cross_ref_record_split = record['cr.globalCrossRefId'].split(':')[1]\n cross_ref_record = 'entrez gene/locuslink:' + cross_ref_record_split\n else:\n cross_ref_record = record['cr.globalCrossRefId']\n\n # The crossreference dictionary is a list of genes\n # linked to a single crossreference.\n # Append the gene if the crossref dict entry exists.\n # Otherwise, create a list and append the entry.\n if cross_ref_record.lower() in master_crossreference_dictionary[key]:\n master_crossreference_dictionary[key][cross_ref_record.lower()].append(record['g.primaryKey'])\n else:\n master_crossreference_dictionary[key][cross_ref_record.lower()] = []\n master_crossreference_dictionary[key][cross_ref_record.lower()].append(record['g.primaryKey'])\n\n # The ids in PSI-MITAB files are lower case, hence the .lower() used above.\n\n return master_crossreference_dictionary",
"def get_ref_names(self) -> dict :\n return self._dc_names(self._ref_dc())",
"def locus2gene(scaflist, gbeglist, gendlist, gdatalist=False, gff=dbpaths['gff'], comprehensive=True ):\n cuffgenes = {}\n\n for result in range(len(scaflist)):\n if result % 1000 == 0:\n print \"%d genes matched of %d\" % (result, len(scaflist))\n cur_scaf = scaflist[result]\n cur_gbeg = gbeglist[result]\n cur_gend = gendlist[result]\n if gdatalist:\n cur_gdata = gdatalist[result]\n else:\n cur_gdata = 0\n fobj = open(gff, 'rb')\n for line in fobj:\n col = line.split()\n if col[2] == \"mRNA\":\n if col[0] == cur_scaf:\n if float(col[3]) <= cur_gend and float(col[4]) >= cur_gbeg:\n try:\n if (cur_scaf, cur_gbeg) in cuffgenes:\n cuffgenes[(cur_scaf, cur_gbeg, 2)] = (re.search('ID=([^;]*);', col[8]).groups()[0], cur_scaf, cur_gbeg, cur_gend, cur_gdata)\n else:\n cuffgenes[(cur_scaf, cur_gbeg)] = (re.search('ID=([^;]*);', col[8]).groups()[0], cur_scaf, cur_gbeg, cur_gend, cur_gdata)\n if not comprehensive:\n break\n except AttributeError:\n print col[8]\n fobj.close()\n\n return cuffgenes"
] | [
"0.7468378",
"0.6801201",
"0.66693723",
"0.6430852",
"0.6347451",
"0.6187618",
"0.61827475",
"0.604143",
"0.5910106",
"0.58944297",
"0.5880748",
"0.5769445",
"0.57518154",
"0.5728297",
"0.5714107",
"0.5687991",
"0.5635786",
"0.5633332",
"0.56206924",
"0.55868036",
"0.5560895",
"0.55580574",
"0.55500114",
"0.55480736",
"0.5528767",
"0.5523632",
"0.5498318",
"0.5498032",
"0.5494282",
"0.5478151"
] | 0.7134653 | 1 |
Returns a dictionary of RefSeq SplicedIntervals (by chromosome and strand) from UCSC table browser. Indexed lists are sorted prior to return for easy search Same as fetchRefSeqIntervals but indexed by chrom and strand | def fetchRefSeqIntervalsIndexed(genome='hg18',proteinCodingOnly=False,verbose=False):
cursor=gbdbConnect(gbdbname=genome)
select="SELECT * FROM refGene"
if verbose:
sys.stderr.write("Fetching RefSeq Sequences...\n")
cursor.execute(select)
rows=cursor.fetchall()
output={}
for chr in genomelib.chr_names:
output[chr]={}
output[chr]['+']=[]
output[chr]['-']=[]
if verbose:
sys.stderr.write("Creating index by chr and strand...\n")
for row in rows:
if proteinCodingOnly and not row['name'].startswith('NM'):
continue
try:
exonStarts = map(int,row['exonStarts'].rstrip().split(",")[:-1])
exonEnds = map(int,row['exonEnds'].rstrip().split(",")[:-1])
except:
print "\t".join(["%s:%s" % (k,v) for k,v in row.iteritems()])
start = int(row['txStart'])
exonOffsets = [x-start for x in exonStarts]
exonLengths = []
for i in xrange(len(exonStarts)):
exonLengths.append(exonEnds[i]-exonStarts[i]+1)
if row['chrom'] in genomelib.chr_names:
output[row['chrom']][row['strand']].append(intervallib.SplicedInterval(row['chrom'],row['txStart'],row['txEnd'],row['strand'],",".join([str(x) for x in exonLengths]),",".join([str(x) for x in exonOffsets]),name=row['name2']))
#Sort
if verbose:
sys.stderr.write("Sorting:\n")
tstart = time.time()
for key in output.keys():
if verbose:
sys.stderr.write("\t%s\t" % key)
output[key]['+'].sort()
output[key]['-'].sort()
tend = time.time()
if verbose:
sys.stderr.write('%0.2f sec\n' % (tend-tstart))
tstart = time.time()
return output | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fetchRefSeqByChrom(RefSeqBed=\"/fg/compbio-t/lgoff/magda/references/human/transcriptome/hg18/hg18_RefSeq.bed\"):\n res = {}\n iter = parseBed(RefSeqBed)\n for i in iter:\n res.setdefault(i.chr,{})\n res[i.chr].setdefault(i.strand,[])\n res[i.chr][i.strand].append(i)\n return res",
"def get_inter_cds_regions(annotations):\n # Determine locations of inter-CDS regions for each chromosome\n inter_cds_regions = {}\n\n for chr_id, chromosome in annotations.items():\n # get chromosome dimensions (the first feature represents the\n # chromosome itself)\n ch_end = int(chromosome.features[0].location.end)\n\n # filter out everything except for genes\n genes = [x for x in chromosome.features if x.type == 'gene']\n\n # order by position along chromosome\n genes.sort(key=lambda x: x.location.start)\n\n # add range before first gene\n start = 0\n\n # keep track of strand of polycistronic transcriptional unit\n strand = None\n\n inter_cds_regions[chr_id] = {\n -1: [],\n +1: []\n }\n\n # iterate through genes and store the ranges between them;\n # for TriTrypDB files, the gene boundaries are generally the same\n # as the CDS boundaries.\n for gene in genes:\n # Determine location for the region up to start of the gene\n end = int(gene.location.start)\n\n # Skip over snoRNAs, etc. that are nested inside of other genes\n # For example: TcCLB TcChr22-2 179,000:180,000\n if end <= start:\n continue\n\n # Add CDS to relevant list based on strand\n if strand is None:\n # Left-most gene\n inter_cds_regions[chr_id][gene.location.strand].append((start, end))\n elif strand != gene.location.strand:\n # Add ORFs in both directions at transcription switch sites (TSSs)\n inter_cds_regions[chr_id][+1].append((start, end))\n inter_cds_regions[chr_id][-1].append((start, end))\n else:\n # Within PTU; look for ORFs on same strand\n inter_cds_regions[chr_id][strand].append((start, end))\n\n # update start counter and strand\n start = int(gene.location.end)\n strand = gene.location.strand\n\n # add region after last gene\n inter_cds_regions[chr_id][strand].append((start, ch_end))\n\n return inter_cds_regions",
"def refseqTSS():\n refSeqs=fetchRefSeq()\n output={}\n for chr in genomelib.chr_names:\n output[chr]=[]\n for strand in ['+','-']:\n for k in refSeqs[chr][strand]:\n v=refSeqs[chr][strand][k]\n if v['strand'] == \"+\":\n tss=v['txStart']\n elif v['strand'] == \"-\":\n tss=v['txEnd']\n tssInfo=(v['name'],v['chrom'],int(tss),v['strand'])\n output[chr].append(tssInfo)\n output[chr].sort(lambda x,y:cmp(x[2],y[2]))\n return output",
"def intervals_and_sources(self, chromosomes):\n num_intervals = sum([len(ints) for ints in chromosomes.itervalues()])\n intervals = np.empty(num_intervals, dtype=np.uint32)\n sources = np.empty(num_intervals, dtype=np.uint8)\n interval_num = 0\n for chromosome, interval_list in sorted(chromosomes.iteritems(), key=lambda x: x[0]):\n for species, end in interval_list:\n intervals[interval_num] = self.genome_index(chromosome, end)\n sources[interval_num] = species\n interval_num += 1\n return intervals, sources",
"def load_intervals_into_memory(self):\n self._intervals = dict()\n for chromosome in self.bw.chroms().keys():\n chromosome_intervals = []\n for start, end, score in self.bw.intervals(chromosome):\n interval = intervaltree.Interval(start, end, data=score)\n chromosome_intervals.append(interval)\n\n self._intervals[chromosome] = intervaltree.IntervalTree(chromosome_intervals)",
"def get_reference_seq_ucsc(chrom, start, end):\n if chrom.startswith('chr'):\n chrom = chrom.replace('chr', '')\n request = 'http://genome.ucsc.edu/cgi-bin/das/hg19/dna?segment=chr{}:{},{}'.format(chrom, start, end)\n try:\n dna = xmltodict.parse(urlopen(request).read())['DASDNA']['SEQUENCE']['DNA']['#text'].replace('\\n', '')\n except (URLError, ExpatError) as e:\n print('Could not open UCSC url. Please check your internet connection.\\n{}\\n{}'.format(request, e.message))\n dna = \"n\" * (start - end)\n return dna",
"def read_chr(fpath):\n\t# init dict and indices\n\tchrom_dicts={}\n\tstart=0\n\tindex=0\n\n\t# iterate through chromosome scores \n\tfor line in fileinput.input(fpath):\n\t\tx=line.split()\n\t\t\n\t\t# if chromosome skips some region, then normalize the previous window (<100 bp) and init new window \t\n\t\tif len(x)==4:\n\t\t\tif start in chrom_dicts:\n\t\t\t\tchrom_dicts[start]/=index\n\t\t\tstart=int(x[2].split(\"=\")[1])\n\t\t\tchrom_dicts[start]=0\n\t\t\tindex=0\n\n\t\t# if not a black region, then make news windows every 100 locations\n\t\tif len(x)==1:\n\t\t\tchrom_dicts[start]+=float(x[0])\n\t\t\tif index==100:\n\t\t\t\tchrom_dicts[start]/=index\n\t\t\t\tindex=0\n\t\t\t\tstart+=100\n\t\t\t\tchrom_dicts[start]=0\n\t\t\tindex+=1\n\t\n\t# track chromosomes that have been binned\n\tprint(\"%s %d\" % (fpath,len(chrom_dicts)))\n\treturn(chrom_dicts)",
"def fetchRefSeq(genome = 'hg18',lookupval = 'name'):\n cursor=gbdbConnect(gbdbname=genome)\n select=\"SELECT * FROM refGene\"\n cursor.execute(select)\n rows=cursor.fetchall()\n output={}\n for chr in genomelib.chr_names:\n output[chr]={}\n output[chr]['+']={}\n output[chr]['-']={}\n for row in rows:\n if row['chrom'] in genomelib.chr_names:\n output[row['chrom']][row['strand']][row[lookupval]]=row\n return output",
"def get_sample_region_coverage(sample_tabix_object, region_dict):\n\n coverage = []\n ## Iterate over the overlapping coverage intervals\n for i, coverage_line in enumerate(\n sample_tabix_object.fetch(\n region_dict[\"chrom\"], int(region_dict[\"start\"]), int(region_dict[\"end\"])\n )\n ):\n\n ## Create a dict for the current line\n cov_dict = dict(\n zip(\n [\"chrom\", \"start\", \"end\", \"coverage_value\"],\n coverage_line.strip().split(\"\\t\"),\n )\n )\n\n ## Add the coverage value for each position defined by the current interval\n coverage.extend(\n [\n int(cov_dict[\"coverage_value\"])\n for x in range(int(cov_dict[\"end\"]) - int(cov_dict[\"start\"]))\n ]\n )\n\n return coverage",
"def get_segments(cst):\n assert isinstance(cst, ChromStruct)\n\n # create a set of coordinates for the start and end of segments\n segs = np.load(cst.sg_files)['sg']\n end = np.cumsum(segs)\n start = np.concatenate(([0], end[:-1]))\n\n return np.column_stack((start, end)).astype(int)",
"def get_cds_regions(annotations):\n # Determine locations of CDS regions for each chromosome\n cds_regions = {}\n\n for chr_id, chromosome in annotations.items():\n # get chromosome dimensions (the first feature represents the\n # chromosome itself)\n ch_end = int(chromosome.features[0].location.end)\n\n # filter out everything except for genes\n genes = [x for x in chromosome.features if x.type == 'gene']\n\n # order by position along chromosome\n genes.sort(key=lambda x: x.location.start)\n\n # add range before first gene\n start = 0\n\n # keep track of strand of polycistronic transcriptional unit\n strand = None\n\n cds_regions[chr_id] = {\n -1: [],\n +1: []\n }\n\n # iterate over genes and add CDS coordinates\n for gene in genes:\n coords = (int(gene.location.start), int(gene.location.end))\n cds_regions[chr_id][gene.location.strand].append(coords)\n\n return cds_regions",
"def get_reference_seq(chrom, start, end, seq_dict=None):\n if not seq_dict:\n return get_reference_seq_ucsc(chrom, start, end)\n # ex. start = 1, end = 3, return [0,1,2]\n # because computer scientists are exclusive starting from 0 but biologists are inclusive starting from 1\n start = int(start) - 1\n try:\n dna = str(seq_dict[chrom][start:end].seq)\n except IndexError as e:\n raise Exception(\"Error: Could not find that sequence: %s\" % str(e))\n except KeyError as e:\n print(\"No chromosome named: %s\\nTrying UCSC...\" % str(e))\n dna = get_reference_seq(chrom, start, end)\n return dna.lower()",
"def hostRefSeq(chr,start,end,strand):\n cursor=gbdbConnect()\n selSQL=\"SELECT * from refGene WHERE chrom='%s' AND txStart<='%d' AND txEnd>='%d'\" % (chr,int(start),int(end))\n cursor.execute(selSQL)\n rows=cursor.fetchall()\n results=[]\n if cursor.rowcount==0:\n return False\n else:\n for row in rows:\n results.append(row)\n return results",
"def fetchRefSeqDict(RefSeqBed=\"/fg/compbio-t/lgoff/magda/references/human/transcriptome/hg18/hg18_RefSeq.bed\"):\n res = {}\n iter = parseBed(RefSeqBed)\n for i in iter:\n res[i.name] = i\n return res",
"def GetOccultationIntervals(self, occType, frontBody, frontShape, frontFrame, backBody, backShape, backFrame, abCorrection, s, e, useEntireIntvl, stepSize, numIntervals, starts, ends):\n return _gmat_py.EphemManager_GetOccultationIntervals(self, occType, frontBody, frontShape, frontFrame, backBody, backShape, backFrame, abCorrection, s, e, useEntireIntvl, stepSize, numIntervals, starts, ends)",
"def tr_nc_dict(dfin):\n\n\ttr_nc_index_dict = OrderedDict()\n\t\n\ttrCount = 0\n\tpreviousTrIndex = 0\n\n\tcurrent_id=\"\"\n\tcurrent_tr=\"\"\n\n\tfor i in range(len(dfin)):\n# print dfin.loc[i]\n\n\t\tif dfin.loc[i,'feature'] == 'transcript':\n\t\t\ttrdict = parse_mod_entry(dfin.loc[i,'transcript_id'])\n\n\t\t\tcurGeneID = trdict['gene_id'][0]\n\n\t\t\tif trCount != 0:\n\t\t\t\tnewTrIndex = i\n\t\t\t\ttr_nc_index_dict[current_id] = [previousTrIndex,newTrIndex]\n\t\t\t\tpreviousTrIndex = i\n\t\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\t\t\ttrCount += 1\n\n\t\t\telse:\n\t\t\t\tnewTrIndex = 0\n\t\t\t\ttrCount +=1\n\t\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\t\t\t\n\t\tif i == (len(dfin)-1):\n\t\t\tnewTrIndex = i+1\n\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\t\ttr_nc_index_dict[current_id] = [previousTrIndex,newTrIndex]\n\t\t\t\n\treturn tr_nc_index_dict",
"def chrDIC(df):\n chr_names=df['chr'].unique().tolist()\n d_chr = d_gtf_chr = {chrom : df[df[\"chr\"]==chrom] for chrom in chr_names}\n return d_chr",
"def split_ref_seq(fdict, cloc, nthreads):\n if fdict is None:\n return [None for _ in range(nthreads)]\n ref_array = [{} for _ in range(nthreads)]\n for chrom in cloc:\n ref_array[cloc[chrom]][chrom] = fdict[chrom]\n return ref_array",
"def get_c2sIndsByRoi(rts, sopuids):\n \n # Get the ReferencedSOPInstanceUIDs from the ContourSequence by ROI:\n RSOPuidsByRoi = get_RSOPuidsByRoi(rts)\n \n c2sInds = []\n c2sIndsByRoi = []\n \n # Loop through each list of ReferencedSOPInstanceUIDs:\n for i in range(len(RSOPuidsByRoi)):\n inds = []\n \n for RefUid in RSOPuidsByRoi[i]:\n # Find the matching index of RefUid in sopuids:\n inds.append(sopuids.index(RefUid))\n \n c2sInds.extend(inds)\n c2sIndsByRoi.append(inds)\n \n return c2sIndsByRoi, c2sInds",
"def get_gbif_occs(self):\n\n # Create a file to store occurrence data.\n self.occfile = os.path.join(self.outdir, self.params['spname'].replace(\" \", \"_\") + \".csv\")\n\n # Get the usageKey for species of interest.\n self.key = species.name_backbone(name = self.params['spname'], rank = 'species')['usageKey']\n\n # Create latitude/longitude lists.\n self.lats = []\n self.lons = []\n\n # Run a while-loop to go through all observations.\n curr_offset = 0\n end_records = False\n while not end_records:\n occ_records = occ.search(taxonKey = self.key, hasCoordinate = True, \n decimalLatitude = ','.join([str(self.params['ymin']), str(self.params['ymax'])]),\n decimalLongitude = ','.join([str(self.params['xmin']), str(self.params['xmax'])]),\n offset = curr_offset\n )\n end_records = occ_records['endOfRecords']\n curr_offset += occ_records['limit']\n\n # Add latitude/longitude results to lists.\n self.lats.extend([i['decimalLatitude'] for i in occ_records['results']])\n self.lons.extend([i['decimalLongitude'] for i in occ_records['results']])\n\n # Print a dot on each cycle to show progress.\n print(\".\", end = \"\")\n\n # When end of data is reached: build pandas dataframe from lists and remove duplicate data points.\n if occ_records['endOfRecords']:\n df = pd.DataFrame({'Latitude': self.lats, 'Longitude': self.lons})\n df = df.drop_duplicates().reset_index()\n df = df.drop('index', axis = 1)\n\n # Reform the lists by subsetting the dataframe.\n self.lats = list(df['Latitude'])\n self.lons = list(df['Longitude'])\n\n # Print final number of records.\n print(f' Found {len(self.lats)} records.')\n\n # Build array to write to CSV file. np.vstack layers arrays vertically, where each layer is species-lat-lon. \n # np.repeat copies the species names as many times as there are entries. It also combines with zip() to put\n # a newline char at the end of each layer.\n csvarr = np.vstack([np.repeat(self.params['spname'].replace(\" \", \"_\"), len(self.lats)), self.lats,\n [\"{}{}\".format(a_, b_) for a_, b_ in zip(self.lons, np.repeat('\\n', len(self.lats)))]]\n ).T\n\n # Write array to CSV file.\n with open(self.occfile, 'w') as f:\n f.write('Species,Latitude,Longitude\\n')\n for line in csvarr:\n f.write(\",\".join(line))\n\n # Transform lists to arrays for downstream application.\n self.lats = np.array(self.lats)\n self.lons = np.array(self.lons)",
"def translateCumuPosIntoChrPos(self, top_loci_in_cumu_pos, cumuSpan2ChrSpanRBDict=None, compareIns=None):\n\t\ttop_loci = []\n\t\tcompareIns = CNVCompareBySmallOverlapRatio(min_reciprocal_overlap=0.0000001)\n\t\tfor span in top_loci_in_cumu_pos:\n\t\t\tcumu_start, cumu_stop = span[:2]\n\t\t\tsegmentKey = CNVSegmentBinarySearchTreeKey(chromosome=0, \\\n\t\t\t\t\t\t\tspan_ls=[cumu_start, cumu_stop], \\\n\t\t\t\t\t\t\tmin_reciprocal_overlap=0.00000000000001,)\n\t\t\t\t\t\t\t#2010-8-17 overlapping keys are regarded as separate instances as long as they are identical.\n\t\t\tnode_ls = []\n\t\t\tcumuSpan2ChrSpanRBDict.findNodes(segmentKey, node_ls=node_ls, compareIns=compareIns)\n\t\t\tif len(node_ls)==0:\n\t\t\t\tsys.stderr.write(\"(%s, %s) not found in cumuSpan2ChrSpanRBDict.\\n\"%(cumu_start, cumu_stop))\n\t\t\tfor node in node_ls:\n\t\t\t\tchr, node_chr_start, node_chr_stop = node.value[:3]\n\t\t\t\toverlapData = get_overlap_ratio(segmentKey.span_ls, [node.key.start, node.key.stop])\n\t\t\t\toverlapFraction1 = overlapData.overlapFraction1\n\t\t\t\toverlapFraction2 = overlapData.overlapFraction2\n\t\t\t\toverlap_length = overlapData.overlap_length\n\t\t\t\toverlap_start_pos = overlapData.overlap_start_pos\n\t\t\t\toverlap_stop_pos = overlapData.overlap_stop_pos\n\t\t\t\t\n\t\t\t\t\n\t\t\t\tstart = overlap_start_pos - node.key.span_ls[0] + node_chr_start\t#overlap_start_pos is in cumu coordinates.\n\t\t\t\tstop = overlap_stop_pos - node.key.span_ls[0] + node_chr_start\n\t\t\t\tif stop>node_chr_stop:\t#truncate it. shouldn't happen though\n\t\t\t\t\tstop = node_chr_stop\n\t\t\t\ttop_loci.append([chr, start, stop])\n\t\treturn top_loci",
"def chromosome_lengths(self):\n chr_lens = {}\n for r in self.regions(lazy=True):\n if chr_lens.get(r.chromosome) is None:\n chr_lens[r.chromosome] = r.end\n continue\n if r.end > chr_lens[r.chromosome]:\n chr_lens[r.chromosome] = r.end\n return chr_lens",
"def getRange(self, chr, start, end, bins=2000, zoomlvl=-1, metric=\"AVG\", respType=\"DataFrame\"):\n try:\n iter = self.file.fetch(chr, start, end)\n # result = []\n # for x in iter:\n # returnBin = (x.reference_name, x.reference_start, x.reference_end, x.query_alignment_sequence, x.query_sequence)\n # result.append(returnBin)\n\n # if self.columns is None:\n # self.columns = [\"chr\", \"start\", \"end\", \"query_alignment_sequence\", \"query_sequence\"]\n\n # if respType is \"DataFrame\":\n # result = toDataFrame(result, self.columns)\n\n (result, _) = get_range_helper(self.toDF, self.get_bin,\n self.get_col_names, chr, start, end, iter, self.columns, respType)\n\n return result, None\n except ValueError as e:\n raise Exception(\"didn't find chromId with the given name\")",
"def gather_strand_by_geneID_dict(genome_gtf):\n strand_by_geneID_dict = {}\n with open(genome_gtf) as f: \n for line in f: \n current_line = line.split('\\t')\n if current_line[2] == \"CDS\":\n current_orf = current_line[8].split(';')[2].split()[1].strip('\\\"')\n current_strand = current_line[6]\n strand_by_geneID_dict[current_orf] = current_strand\n return strand_by_geneID_dict",
"def merge_overlapping_on_chrm_and_strand(intervals, coverage):\n sorted_by_lower_bound = sorted(intervals, key=lambda x: x.left)\n merged = []\n for higher in sorted_by_lower_bound:\n if not merged:\n merged.append(higher)\n else:\n lower = merged[-1]\n # test for intersection between lower and higher:\n # we know via sorting that lower[0] <= higher[0]\n if higher.left <= lower.right:\n upper_bound = int(max(lower.right, higher.right))\n new_peak = peak(lower.chrm, lower.left, upper_bound, lower.strand)\n new_peak.height = 0\n window = HTSeq.GenomicInterval(lower.chrm, lower.left, upper_bound, lower.strand)\n wincvg = np.fromiter(coverage[window], dtype='i')\n new_peak.height = int(max(wincvg))\n merged[-1] = new_peak # replace by merged interval\n else:\n merged.append(higher)\n return merged",
"def parse_csvs(self, file_list):\n # csv headers\n STRAIN = 'strain'\n CHROMOSOME = 'chrom'\n START = 'start'\n END = 'end'\n SUBSPECIES = 'subspecies'\n COLOR = 'color'\n COLOR_TO_NAME = ['mus', 'cas', 'dom']\n strains = {}\n for filename in file_list:\n with open(os.path.join(self.path, filename)) as csvfile:\n reader = csv.DictReader(csvfile)\n if SUBSPECIES in reader.fieldnames:\n def subspec_int(r):\n return subspecies.to_int(r[SUBSPECIES])\n else:\n def subspec_int(r):\n return subspecies.to_int(COLOR_TO_NAME[np.argmax([int(v) for v in r[COLOR].split(' ')])])\n for row in reader:\n chromosomes = strains.setdefault(row[STRAIN], OrderedDict())\n intervals = chromosomes.setdefault(CHROMO_TO_INT[row[CHROMOSOME]], [])\n lastEnd = 0 if not intervals else intervals[-1][1]\n # add null interval if there is a gap between intervals with assigned subspecies\n if not int(row[START]) - 1 <= lastEnd <= int(row[START]) + 1:\n intervals.append((subspecies.UNKNOWN, int(row[START])))\n intervals.append((subspec_int(row), int(row[END])))\n # add null interval to end of each chromosome\n for chromosomes in strains.itervalues():\n for chromosome, intervals in chromosomes.iteritems():\n if intervals[-1] < self.sizes[chromosome - 1]:\n intervals.append((subspecies.UNKNOWN, self.sizes[chromosome - 1]))\n return strains",
"def sample_intervals(self, num_intervals):\n chrom_sample = self.chrom_table.sample(\n n=num_intervals,\n replace=True,\n weights=self.chrom_table[\"weight\"],\n random_state=self.rng\n )\n chrom_sample[\"start\"] = (\n self.rng.rand(num_intervals) * chrom_sample[\"max_size\"]\n ).astype(int)\n chrom_sample[\"end\"] = chrom_sample[\"start\"] + self.sample_length\n\n return chrom_sample[[\"chrom\", \"start\", \"end\"]].values.astype(object)",
"def cytoband_by_chrom(self, build=\"37\"):\n if \"38\" in str(build):\n build = \"38\"\n else:\n build = \"37\"\n\n match = {\"$match\": {\"build\": build}}\n group = {\n \"$group\": {\n \"_id\": \"$chrom\",\n \"cytobands\": {\n \"$push\": {\n \"band\": \"$band\",\n \"chrom\": \"$chrom\",\n \"start\": \"$start\",\n \"stop\": \"$stop\",\n }\n },\n }\n }\n sort = {\"$sort\": {\"start\": pymongo.ASCENDING}}\n\n result = self.cytoband_collection.aggregate([match, group, sort])\n cytobands_by_chrom = {each.pop(\"_id\"): each for each in result}\n return cytobands_by_chrom",
"def get_recordrange(self):\r\n if self.version >= 10.1:\r\n querystr = \"\"\"?where=&outFields=*&returnGeometry=false&returnIdsOnly=false&returnCountOnly=false&orderByFields=&groupByFieldsForStatistics=&outStatistics=[{%0D%0A++++\"statisticType\"%3A+\"count\"%2C%0D%0A++++\"onStatisticField\"%3A+\"objectid\"%2C+++++\"outStatisticFieldName\"%3A+\"oidcount\"%0D%0A++}%2C{%0D%0A++++\"statisticType\"%3A+\"min\"%2C%0D%0A++++\"onStatisticField\"%3A+\"objectid\"%2C+++++\"outStatisticFieldName\"%3A+\"oidmin\"%0D%0A++}%2C{%0D%0A++++\"statisticType\"%3A+\"max\"%2C%0D%0A++++\"onStatisticField\"%3A+\"objectid\"%2C+++++\"outStatisticFieldName\"%3A+\"oidmax\"%0D%0A++}]&returnZ=false&returnM=false&returnDistinctValues=false&f=pjson\"\"\"\r\n req = requests.get(self.endpointurl + querystr)\r\n self.recordinfo = req.json()[\"features\"][0][\"attributes\"]\r\n\r\n elif self.version < 10.1:\r\n querystr = \"\"\"?text=&geometry=&geometryType=esriGeometryPoint&inSR=&spatialRel=esriSpatialRelIntersects&relationParam=&objectIds=&where=objectid+>+-1&time=&returnCountOnly=true&returnIdsOnly=false&returnGeometry=false&maxAllowableOffset=&outSR=&outFields=&f=pjson\"\"\"\r\n req = requests.get(self.endpontquerystr + qs)\r\n self.recordinfo = {\"oidmin\": 0, \"oidmax\": req.json()[\"count\"]}\r\n\r\n [\r\n self.iterlist.append([x, x + 999])\r\n for x in range(\r\n self.recordinfo[\"oidmin\"]\r\n if self.recordinfo[\"oidmin\"] != self.recordinfo[\"oidmax\"]\r\n else 1 - self.recordinfo[\"oidmin\"],\r\n self.recordinfo[\"oidmax\"],\r\n 1000,\r\n )\r\n ]",
"def getTranslationDataStructureForBackgroundLoci(self, db_250k, cnv_method_id=None, min_MAF=0.1):\n\t\tsys.stderr.write(\"Getting translation structures between (chr, start, stop) and (cumu_start, cumu_stop) for cnv method %s ...\"%\\\n\t\t\t\t\t\tcnv_method_id)\n\t\tTableClass = Stock_250kDB.CNV\n\t\tquery = TableClass.query.filter_by(cnv_method_id=cnv_method_id).order_by(TableClass.chromosome).order_by(TableClass.start)\n\t\t\n\t\tchrSpan2cumuStartRBDict = RBDict()\n\t\tcumuSpan2ChrSpanRBDict = RBDict()\n\t\t\n\t\tcumu_start = 0\n\t\tcounter = 0\n\t\treal_counter = 0\n\t\tfor row in query:\n\t\t\tcounter += 1\n\t\t\tmaf = min(row.frequency, 1-row.frequency)\n\t\t\tif maf<=min_MAF:\n\t\t\t\tcontinue\n\t\t\t\n\t\t\treal_counter += 1\n\t\t\tchrSpanKey = CNVSegmentBinarySearchTreeKey(chromosome=row.chromosome, \\\n\t\t\t\t\t\t\tspan_ls=[row.start, row.stop], \\\n\t\t\t\t\t\t\tmin_reciprocal_overlap=0.00000000000001,)\n\t\t\t\t\t#2010-8-17 overlapping keys are regarded as separate instances as long as they are not identical.\n\t\t\tchrSpan2cumuStartRBDict[chrSpanKey] = cumu_start\t#cumu_start is 0-based\n\t\t\t\n\t\t\tsize = row.stop-row.start+1\n\t\t\tspan_ls=[cumu_start+1, cumu_start+size]\n\t\t\tsegmentKey = CNVSegmentBinarySearchTreeKey(chromosome=0, \\\n\t\t\t\t\t\t\tspan_ls=span_ls, \\\n\t\t\t\t\t\t\tmin_reciprocal_overlap=0.00000000000001,)\n\t\t\t\t\t#2010-8-17 overlapping keys are regarded as separate instances as long as they are not identical.\n\t\t\tif segmentKey not in cumuSpan2ChrSpanRBDict:\n\t\t\t\tcumuSpan2ChrSpanRBDict[segmentKey] = (row.chromosome, row.start, row.stop)\n\t\t\telse:\n\t\t\t\tsys.stderr.write(\"Error: %s of chr %s is already in cumuSpan2ChrSpanRBDict.\\n\"%(segmentKey, row.chromosome))\n\t\t\t\n\t\t\tcumu_start += size\n\t\tsys.stderr.write(\"%s out of %s CNVs are included. Done.\\n\"%(real_counter, counter))\n\t\treturn PassingData(cumuSpan2ChrSpanRBDict=cumuSpan2ChrSpanRBDict, chrSpan2cumuStartRBDict=chrSpan2cumuStartRBDict)"
] | [
"0.60520214",
"0.5973717",
"0.58647096",
"0.5702146",
"0.55897397",
"0.54622334",
"0.5423216",
"0.5373043",
"0.52435833",
"0.5237961",
"0.5187223",
"0.5170602",
"0.5147745",
"0.5144643",
"0.5126892",
"0.50961936",
"0.5087652",
"0.50801325",
"0.5040272",
"0.5039929",
"0.5024084",
"0.50072527",
"0.50001335",
"0.49845147",
"0.49681926",
"0.49620727",
"0.4909403",
"0.48865852",
"0.48791486",
"0.48647836"
] | 0.71107197 | 0 |
Checks to see if interval is within a host RefSeq gene (does not test strand!!). If no, returns False. If yes, returns a list of dictionaries for each host RefSeq gene. Keys are consistent with field names from UCSC table refGene. | def hostRefSeq(chr,start,end,strand):
cursor=gbdbConnect()
selSQL="SELECT * from refGene WHERE chrom='%s' AND txStart<='%d' AND txEnd>='%d'" % (chr,int(start),int(end))
cursor.execute(selSQL)
rows=cursor.fetchall()
results=[]
if cursor.rowcount==0:
return False
else:
for row in rows:
results.append(row)
return results | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_refrange(self):\n if np.all(np.isnan(self.par)):\n print( 'Run params() before')\n return\n if hasattr(self,'refranges'):\n return self.refranges\n ice_r = [r for r in xrange(len(self.ref)) if ~ np.isnan(self.par[1,r,10,0])]\n liq_r = [r for r in xrange(len(self.ref)) if ~ np.isnan(self.par[0,r,10,0])]\n return (liq_r,ice_r)",
"def fetchRefSeqIntervalsIndexed(genome='hg18',proteinCodingOnly=False,verbose=False):\n cursor=gbdbConnect(gbdbname=genome)\n select=\"SELECT * FROM refGene\"\n if verbose:\n sys.stderr.write(\"Fetching RefSeq Sequences...\\n\")\n cursor.execute(select)\n rows=cursor.fetchall()\n output={}\n for chr in genomelib.chr_names:\n output[chr]={}\n output[chr]['+']=[]\n output[chr]['-']=[]\n if verbose:\n sys.stderr.write(\"Creating index by chr and strand...\\n\")\n \n for row in rows:\n if proteinCodingOnly and not row['name'].startswith('NM'):\n continue\n try:\n exonStarts = map(int,row['exonStarts'].rstrip().split(\",\")[:-1])\n exonEnds = map(int,row['exonEnds'].rstrip().split(\",\")[:-1])\n except:\n print \"\\t\".join([\"%s:%s\" % (k,v) for k,v in row.iteritems()])\n start = int(row['txStart'])\n exonOffsets = [x-start for x in exonStarts]\n exonLengths = []\n for i in xrange(len(exonStarts)):\n exonLengths.append(exonEnds[i]-exonStarts[i]+1)\n if row['chrom'] in genomelib.chr_names:\n output[row['chrom']][row['strand']].append(intervallib.SplicedInterval(row['chrom'],row['txStart'],row['txEnd'],row['strand'],\",\".join([str(x) for x in exonLengths]),\",\".join([str(x) for x in exonOffsets]),name=row['name2']))\n \n #Sort \n if verbose:\n sys.stderr.write(\"Sorting:\\n\")\n tstart = time.time()\n for key in output.keys():\n if verbose:\n sys.stderr.write(\"\\t%s\\t\" % key)\n output[key]['+'].sort()\n output[key]['-'].sort()\n tend = time.time()\n if verbose:\n sys.stderr.write('%0.2f sec\\n' % (tend-tstart))\n tstart = time.time()\n return output",
"def ships_in_range(self):\n\n query_string = \"SELECT * from ships_in_range;\"\n\n # Perform query\n self.conn_cur.execute(query_string)\n results = self.conn_cur.fetchall()\n\n # Build dictionary\n ranges = {}\n for row in results:\n ranges[row[0]] = row[1]\n\n return ranges",
"def ranges(self, keys:list)->list:\n if not isinstance(keys, list):\n keys = [keys]\n ranges = {}\n for key in keys:\n ranges[key] = [None, None]\n for list_item in self.list:\n for key in keys:\n if ranges[key][0] is None:\n ranges[key][0] = list_item[key]\n ranges[key][1] = list_item[key]\n elif ranges[key][0] > list_item[key]:\n ranges[key][0] = list_item[key]\n elif ranges[key][1] < list_item[key]:\n ranges[key][1] = list_item[key]\n return ranges",
"def ValidClusterRanges(self):\n for cluster_range in self.cluster_ranges:\n the_range = cluster_range.split(\"-\")\n print(f\"Checking that range {the_range} falls within our data area\")\n try:\n if int(the_range[0]) < self.low_data_cluster or int(the_range[1]) > self.high_data_cluster:\n print(f\"False. {the_range[0]} or {the_range[1]} is outside of our data area\")\n return False\n except TypeError as t_err:\n print(f\"Error. Range does not appear to be an int\")\n return False\n return True",
"def range_dic_(df_):\n range_dic = {}\n for man in df_['maneuver']:\n trial_indx = df_.index[df_['maneuver'] == man].tolist()\n range_ = (min(trial_indx), max(trial_indx))\n range_dic.update({man: range_})\n return range_dic",
"def range_dic_(df_):\n range_dic = {}\n for man in df_['maneuver']:\n trial_indx = df_.index[df_['maneuver'] == man].tolist()\n range_ = (min(trial_indx), max(trial_indx))\n range_dic.update({man: range_})\n return range_dic",
"def fetchRefSeqByChrom(RefSeqBed=\"/fg/compbio-t/lgoff/magda/references/human/transcriptome/hg18/hg18_RefSeq.bed\"):\n res = {}\n iter = parseBed(RefSeqBed)\n for i in iter:\n res.setdefault(i.chr,{})\n res[i.chr].setdefault(i.strand,[])\n res[i.chr][i.strand].append(i)\n return res",
"def validate_rangeIncludes_field(self, rangeincludes_value):\n rangeincludes_value = dict2list(rangeincludes_value)\n for record in rangeincludes_value:\n if record[\"@id\"] not in self.all_classes:\n raise KeyError('Value of rangeincludes: {} is not defined in the schema.'.format(record[\"@id\"]))",
"def range_match(verifield, ranges):\n return verifield[0] >= ranges[0][0] and verifield[0] <= ranges[0][1] and verifield[1] >= ranges[1][0] and verifield[1] <= ranges[1][1]",
"def build_interval_trees(reference_gtf):\n\n if isinstance(reference_gtf, (str, Path)):\n reference_gtf = GtfFile(reference_gtf)\n\n # Only select gene features for now.\n genes = reference_gtf.fetch(filters={'feature': 'gene'})\n\n # Note, below code assumes that genes are ordered by contig.\n\n trees = {}\n for contig, grp in itertools.groupby(genes, lambda r: r['contig']):\n # Build a tree for each individual chromosome.\n intervals = ((g['start'], g['end'], dict(g)) for g in grp\n if g['end'] > g['start']) # Avoid null intervals.\n trees[contig] = IntervalTree.from_tuples(intervals)\n\n return trees",
"def check_intervals(what):\n\n intervals = []\n result = []\n\n for interval in what:\n for seen_interval in intervals:\n if ((interval[1] > seen_interval[0]) and (interval[0] <= seen_interval[0])) or \\\n ((interval[0] < seen_interval[1]) and (interval[0] >= seen_interval[0])):\n result.append(interval)\n break\n\n intervals.append(interval)\n\n return result",
"def intervalLookup(intervals,key = \"ID\"):\n lookup = {}\n \n for interval in intervals:\n ikey = None\n \n if key in interval.data:\n ikey = interval.data[key]\n else:\n ikey = key(interval)\n \n if ikey is not None:\n assert ikey not in lookup, Exception(\"duplicate key '%s'\" % ikey)\n lookup[ikey] = interval\n \n return lookup",
"def _parse_requantization_ranges(self):\n res = {}\n\n print_suffix = \"__print__\"\n lines = self._get_valid_log()\n temp_min = {}\n temp_max = {}\n pattern_def = r\"{};{}:\\[\\-?\\d+\\.?\\d*e?-?\\+?\\d*\\]\".format(print_suffix, self.postfix)\n for i in lines:\n if not re.search(pattern_def, i):\n continue\n\n max_line_data = i.split(print_suffix + \";\" + self.postfix)[-1]\n min_value = max_line_data.split('][')[0].split('[')[1]\n max_value = max_line_data.split('][')[1].split(']')[0]\n name = i.split(';')[1].strip()[:-len(print_suffix)]\n if name not in temp_min:\n temp_min[name] = []\n if name not in temp_max:\n temp_max[name] = []\n\n temp_min[name].append(float(min_value))\n temp_max[name].append(float(max_value))\n\n for key in temp_min:\n target_min_index = int(np.ceil(len(temp_min[key]) * (1 - self.threshold)))\n\n if key not in res:\n res[key] = []\n\n if target_min_index > len(temp_min[key]) - 1:\n target_min_index = len(temp_min[key]) - 1\n res[key].append(sorted(temp_min[key])[target_min_index])\n\n for key in temp_max:\n target_max_index = int(np.floor(len(temp_max[key]) * self.threshold)) - 1\n\n if target_max_index > len(temp_max[key]) - 1:\n target_max_index = len(temp_max[key]) - 1\n\n res[key].append(sorted(temp_max[key])[target_max_index])\n\n if self.tensor_data:\n for k, v in self.tensor_data.items():\n if k in res:\n self.logger.debug(\"Update node {} min to {}, max to {}.\".format(k, v[2], v[3]))\n res[k] = [v[2], v[3]]\n return res",
"def overlaps(interval,intervals):\n return [x for x in intervals if interval.overlaps(x)]",
"def is_in_hotspot(self):\r\n in_hotspot = False\r\n hotspots = parser.parse_hotspot_bed()\r\n \r\n if hotspots.get(self.chrom): \r\n chrom_hotspots = hotspots[self.chrom]\r\n \r\n for interval in chrom_hotspots: \r\n if interval[0] <= self.pos <= interval[1]:\r\n in_hotspot = True\r\n break\r\n \r\n return in_hotspot",
"def validRange(line):\n line_col = str.split(line)\n chrom = line_col[0]\n pos = line_col[1]\n# any(lower <= postcode <= upper for (lower, upper) in [(1000, 2249), (2555, 2574), ...])\n if any(float(low) <= float(pos) <= float(high) for (low,high) in TE_ranges[chrom]):\n return False\n return True",
"def contained(query, intervalset):\n for i in intervalset:\n if query == i:\n continue\n if query[0] <= i[0] and i[1] <= query[1] and i[1]-i[0] < query[1]-query[0]:\n return True\n return False",
"def is_work_available(start_dt, end_dt, intervals):\n def find_start_index():\n \"\"\" find the highest index of intervals for which the start_date (element [0]) is before (or at) start_dt\n \"\"\"\n def recursive_find_index(lower_bound, upper_bound):\n if upper_bound - lower_bound <= 1:\n if intervals[upper_bound][0] <= start_dt:\n return upper_bound\n return lower_bound\n index = (upper_bound + lower_bound) // 2\n if intervals[index][0] <= start_dt:\n return recursive_find_index(index, upper_bound)\n else:\n return recursive_find_index(lower_bound, index)\n\n if start_dt <= intervals[0][0] - tolerance:\n return -1\n if end_dt >= intervals[-1][1] + tolerance:\n return -1\n return recursive_find_index(0, len(intervals) - 1)\n\n if not intervals:\n return False\n\n tolerance = timedelta(minutes=1)\n start_index = find_start_index()\n if start_index != -1:\n for index in range(start_index, len(intervals)):\n if intervals[index][1] >= end_dt - tolerance:\n return True\n if len(intervals) == index + 1 or intervals[index + 1][0] - intervals[index][1] > tolerance:\n return False\n return False",
"def covers_overlaps(self, bounds):\n bounds = tuple(float(b) for b in bounds)\n return self.numba_rtree.covers_overlaps(bounds)",
"def isOverlap(peak, ref_distance_map, ref_distance_indexmap):\n chromosome = peak[0]\n start = int(peak[1])\n end = int(peak[2])\n\n if chromosome not in ref_distance_indexmap:\n return False\n\n indexes = ref_distance_indexmap[chromosome]\n\n left_index = bisect(indexes, start)\n right_index = bisect(indexes, end)\n\n # the rational is if overlap, the distance is zero\n candidate_regions = set()\n\n potential_indexes = []\n\n left_index = left_index - 10 if left_index - 10 >= 0 else 0\n for index in indexes[left_index - 1: right_index+10]:\n potential_indexes.append(index)\n\n for feature_position in potential_indexes:\n candidate_regions = candidate_regions.union(ref_distance_map[chromosome][feature_position])\n\n for region in candidate_regions:\n if start <= region.start <= end:\n return True\n if start <= region.end <= end:\n return True\n if region.start <= start and end <= region.end:\n return True\n return False",
"def gene_coords_by_range(probes, chrom, start, end, ignore=params.IGNORE_GENE_NAMES):\n ignore += params.ANTITARGET_ALIASES\n # Tabulate the genes in the selected region\n genes = collections.OrderedDict()\n for row in probes.in_range(chrom, start, end):\n name = str(row.gene)\n if name in genes:\n genes[name][1] = row.end\n elif name not in ignore:\n genes[name] = [row.start, row.end]\n # Reorganize the data structure\n return {\n chrom: [(gstart, gend, name) for name, (gstart, gend) in list(genes.items())]\n }",
"def paired_interval_extend(uniq_fragment,fragment_cov,gtf_dic):\n out_dic = {}\n total_reads = 0\n for key in uniq_fragment.keys():\n chr_no = key[0]\n #print (frag_start,frag_end)\n frag_strand = key[3]\n interval_comp = uniq_fragment[key][0]\n complete_info = uniq_fragment[key][1]\n frag_cov = fragment_cov[key]\n total_reads += frag_cov\n geneNA = 'NA'\n geneType = 'NA'\n geneRegion = 'NA'\n flag = 0\n for trans in gtf_dic[(chr_no,frag_strand)]:\n frag_start,frag_end = key[1:3]\n # for trans in gtf_dic[('chr1','-')]:\n # if chr_no == 'chr1' and frag_strand == '-':\n if frag_start > trans[0] and frag_end < trans[1]:\n #print 'Hello!'\n # print (trans)\n geneNA = trans[4]\n geneType = trans[5]\n if geneType == 'protein_coding':\n CDS_start,CDS_end = trans[2:4]\n if frag_start >= CDS_start and frag_end <= CDS_end:\n geneRegion = 'CDS'\n elif frag_strand == '+':\n if frag_end <= CDS_start:\n geneRegion = '5UTR'\n elif frag_start < CDS_start and frag_end > CDS_start:\n geneRegion = '5UTR-CDS'\n elif frag_start < CDS_end and frag_end > CDS_end:\n geneRegion = 'CDS-3UTR'\n elif frag_start >= CDS_end:\n geneRegion = '3UTR'\n elif frag_strand == '-':\n if frag_end <= CDS_start:\n geneRegion = '3UTR'\n elif frag_start < CDS_start and frag_end > CDS_start:\n geneRegion = 'CDS-3UTR'\n elif frag_start < CDS_end and frag_end > CDS_end:\n geneRegion = '5UTR-CDS'\n elif frag_start >= CDS_end:\n geneRegion = '5UTR'\n else:\n geneRegion = 'Null'\n # print (frag_start,frag_end,CDS_start,CDS_end,geneNA,geneRegion)\n#------------------------------------------------------------------------------ intersect of fragments interval and exons interval\n frag_intersect = interval_comp & trans[-1]\n interval_comp_length = sum([interval_comp[a].upper- interval_comp[a].lower for a in range(0,len(interval_comp))])\n # print (interval_comp)\n # print (frag_intersect)\n#------------------------------------------------------------------------------ fragments located in introns\n if frag_intersect == P.empty(): \n flag = 1\n start_out = []\n length_out = []\n for interval_region in list(interval_comp):\n start_out.append(str(int(interval_region.lower - frag_start)))\n length_out.append(str(int(interval_region.upper - interval_region.lower)))\n out_dic.setdefault((chr_no,frag_start,frag_end,frag_strand),[]).append((chr_no,str(frag_start),str(frag_end),\\\n geneNA,geneType,frag_strand,\\\n str(frag_start),str(frag_end),'intron',str(len(start_out)),\\\n ','.join(length_out),','.join(start_out),str(frag_cov),flag,complete_info))\n else:\n if complete_info == 'complete':\n flag = 3\n #print interval_comp\n#------------------------------------------------------------------------------ reduce alignment noise\n frag_intersect_length = sum([frag_intersect[a].upper-frag_intersect[a].lower for a in range(0,len(frag_intersect))])\n absolute_diff = abs(frag_intersect_length-interval_comp_length)\n if absolute_diff == 0:\n#------------------------------------------------------------------------------ \n start_region = []\n length_region = []\n for region in frag_intersect:\n start_region.append(str(int(region.lower - frag_start)))\n length_region.append(str(int(region.upper - region.lower)))\n out_dic.setdefault((chr_no,frag_start,frag_end,frag_strand),[]).append((chr_no,str(frag_start),str(frag_end),\\\n geneNA,geneType,frag_strand,\\\n str(frag_start),str(frag_end),geneRegion,str(len(start_region)),\\\n ','.join(length_region),','.join(start_region),str(frag_cov),flag,complete_info))\n else:\n start_region = []\n length_region = []\n for region in interval_comp:\n start_region.append(str(int(region.lower - frag_start)))\n length_region.append(str(int(region.upper - region.lower)))\n out_dic.setdefault((chr_no,frag_start,frag_end,frag_strand),[]).append((chr_no,str(frag_start),str(frag_end),geneNA,geneType,\\\n frag_strand,str(frag_start),str(frag_end),'intron-containing',str(len(start_region)),\\\n ','.join(length_region),','.join(start_region),str(frag_cov),flag,complete_info))\n else:\n #print interval_comp\n #print frag_intersect\n#------------------------------------------------------------------------------ fragments boundaries located in exons\n #print frag_intersect[0][0],frag_start,frag_intersect[-1][1],frag_end\n #print abs_position\n # print (P.closedopen(frag_start,frag_end),trans[-1])\n interval_update = P.closedopen(frag_start,frag_end) & trans[-1]\n # print (interval_update)\n frag_trans_length = sum([interval_update[a].upper-interval_update[a].lower for a in range(0,len(interval_update))])\n absolute_diff = abs(frag_trans_length-interval_comp_length)\n #print absolute_diff\n #print geneRegion\n #print interval_comp\n #print abs_position\n if absolute_diff <= 300: #insert sequence length <=200nt\n #print frag_trans_length,interval_comp_length\n #print geneRegion\n flag = 2\n start_out = []\n length_out = []\n for interval_region in list(interval_update):\n start_out.append(str(int(interval_region.lower - frag_start)))\n length_out.append(str(int(interval_region.upper - interval_region.lower)))\n out_dic.setdefault((chr_no,frag_start,frag_end,frag_strand),[]).append((chr_no,str(frag_start),str(frag_end),\\\n geneNA,geneType,frag_strand,\\\n str(frag_start),str(frag_end),geneRegion,str(len(start_out)),\\\n ','.join(length_out),','.join(start_out),str(frag_cov),flag,complete_info))\n else:\n # print (trans)\n flag = 1\n start_out = []\n length_out = []\n for interval_region in list(interval_comp):\n start_out.append(str(int(interval_region.lower - frag_start)))\n length_out.append(str(int(interval_region.upper - interval_region.lower)))\n out_dic.setdefault((chr_no,frag_start,frag_end,frag_strand),[]).append((chr_no,str(frag_start),str(frag_end),\\\n geneNA,geneType,frag_strand,\\\n str(frag_start),str(frag_end),'intron-containing',str(len(start_out)),\\\n ','.join(length_out),','.join(start_out),str(frag_cov),flag,complete_info))\n if flag == 0:\n start_out = []\n length_out = []\n for interval_region in list(interval_comp):\n start_out.append(str(int(interval_region.lower - frag_start)))\n length_out.append(str(int(interval_region.upper - interval_region.lower)))\n out_dic[(chr_no,frag_start,frag_end,frag_strand)] = [(chr_no,str(frag_start),str(frag_end),'intergenic','intergenic',frag_strand,\\\n str(frag_start),str(frag_end),geneRegion,str(len(start_out)),\\\n ','.join(length_out),','.join(start_out),str(frag_cov),flag,complete_info)]\n print ('Total treated fragments: ' + str(total_reads))\n return out_dic",
"def test_interval_dict_structure(self):\n self.model = {}\n self.model['node_names'] = ['E1','E2','E3','E4','E5','E6']\n dummy_data=[1,2,3,5,6,7]\n # =============================================================================\n # Correct dict structure should be a nested dict with two layers, including\n # the interval name, the node names in that interval and then the index number\n # in the model['node_names] list.\n #\n # Nodes with values on the edge between two intervals should always be placed in\n # the interval with that value as the minimum.\n # =============================================================================\n correct_dict={'1.000 - 3.000': {'E1':0,'E2':1},\n '3.000 - 5.000': {'E3':2},\n '5.000 - 7.000': {'E4':3,'E5':4,'E6':5}}\n \n interval_results, interval_names = viswaternet.network.bin_parameter(self,dummy_data,self.model['node_names'],3)\n self.assertDictEqual(correct_dict,interval_results,\"bin_parameter is not producing correct dictionary structure.\")",
"def provider_range_lookup(self, record):\n pass",
"def fetchRefSeq(genome = 'hg18',lookupval = 'name'):\n cursor=gbdbConnect(gbdbname=genome)\n select=\"SELECT * FROM refGene\"\n cursor.execute(select)\n rows=cursor.fetchall()\n output={}\n for chr in genomelib.chr_names:\n output[chr]={}\n output[chr]['+']={}\n output[chr]['-']={}\n for row in rows:\n if row['chrom'] in genomelib.chr_names:\n output[row['chrom']][row['strand']][row[lookupval]]=row\n return output",
"def find_matching_segments(self):\n hyp_matched_segs = [TIntervalGroup() for i in range(len(self.hyp))]\n for gid_ref, match_ref in enumerate(self.ref):\n bg_ref = match_ref.bbox_group\n max_gid, max_area = -1, 0\n for gid_hyp, bg_hyp in enumerate(self.hyp.get_bbox_groups()):\n rx, ry = bg_ref.page_range(), bg_hyp.page_range()\n if ry[0] > rx[1]:\n break\n area = (bg_ref & bg_hyp)\n if area > max_area:\n max_gid, max_area = gid_hyp, area\n if max_gid != -1:\n hyp_matched_segs[max_gid].extend(match_ref.tinterval_group.copy())\n print('%d -> %d' % (gid_ref, max_gid))\n for seg in hyp_matched_segs:\n seg.reduce()\n return hyp_matched_segs",
"def get_recordrange(self):\r\n if self.version >= 10.1:\r\n querystr = \"\"\"?where=&outFields=*&returnGeometry=false&returnIdsOnly=false&returnCountOnly=false&orderByFields=&groupByFieldsForStatistics=&outStatistics=[{%0D%0A++++\"statisticType\"%3A+\"count\"%2C%0D%0A++++\"onStatisticField\"%3A+\"objectid\"%2C+++++\"outStatisticFieldName\"%3A+\"oidcount\"%0D%0A++}%2C{%0D%0A++++\"statisticType\"%3A+\"min\"%2C%0D%0A++++\"onStatisticField\"%3A+\"objectid\"%2C+++++\"outStatisticFieldName\"%3A+\"oidmin\"%0D%0A++}%2C{%0D%0A++++\"statisticType\"%3A+\"max\"%2C%0D%0A++++\"onStatisticField\"%3A+\"objectid\"%2C+++++\"outStatisticFieldName\"%3A+\"oidmax\"%0D%0A++}]&returnZ=false&returnM=false&returnDistinctValues=false&f=pjson\"\"\"\r\n req = requests.get(self.endpointurl + querystr)\r\n self.recordinfo = req.json()[\"features\"][0][\"attributes\"]\r\n\r\n elif self.version < 10.1:\r\n querystr = \"\"\"?text=&geometry=&geometryType=esriGeometryPoint&inSR=&spatialRel=esriSpatialRelIntersects&relationParam=&objectIds=&where=objectid+>+-1&time=&returnCountOnly=true&returnIdsOnly=false&returnGeometry=false&maxAllowableOffset=&outSR=&outFields=&f=pjson\"\"\"\r\n req = requests.get(self.endpontquerystr + qs)\r\n self.recordinfo = {\"oidmin\": 0, \"oidmax\": req.json()[\"count\"]}\r\n\r\n [\r\n self.iterlist.append([x, x + 999])\r\n for x in range(\r\n self.recordinfo[\"oidmin\"]\r\n if self.recordinfo[\"oidmin\"] != self.recordinfo[\"oidmax\"]\r\n else 1 - self.recordinfo[\"oidmin\"],\r\n self.recordinfo[\"oidmax\"],\r\n 1000,\r\n )\r\n ]",
"def is_interval(self):\n return len(self.interval_list) > 0",
"def is_interval(self):\n return True"
] | [
"0.5347442",
"0.51855284",
"0.5100473",
"0.50937337",
"0.5092169",
"0.5061189",
"0.5061189",
"0.5053239",
"0.50323635",
"0.50276566",
"0.5026319",
"0.50240004",
"0.49963844",
"0.4995861",
"0.49941018",
"0.49801216",
"0.4969835",
"0.4949664",
"0.49097306",
"0.48739707",
"0.48597744",
"0.48492002",
"0.48460683",
"0.48142284",
"0.47932383",
"0.47853026",
"0.4765363",
"0.47529936",
"0.47422227",
"0.4735424"
] | 0.5767976 | 0 |
Checks to see if interval is entirely within a known wgRNA gene (including miRNA). Does consider strand!!! If no flanking host wgRNA, returns False. If yes, returns a list of dictionaries for each host wgRNA gene. Keys are consistent with field names from UCSC table wgRNA. | def testwgRNA(chr,start,end,strand):
cursor=gbdbConnect()
selSQL="SELECT * from wgRna WHERE chrom='%s' AND strand='%s' AND chromStart<='%d' AND chromEnd>='%d'" % (chr,strand,int(start),int(end))
cursor.execute(selSQL)
rows=cursor.fetchall()
results=[]
if cursor.rowcount==0:
return False
else:
for row in rows:
results.append(row)
return results | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_intervals(what):\n\n intervals = []\n result = []\n\n for interval in what:\n for seen_interval in intervals:\n if ((interval[1] > seen_interval[0]) and (interval[0] <= seen_interval[0])) or \\\n ((interval[0] < seen_interval[1]) and (interval[0] >= seen_interval[0])):\n result.append(interval)\n break\n\n intervals.append(interval)\n\n return result",
"def contained(query, intervalset):\n for i in intervalset:\n if query == i:\n continue\n if query[0] <= i[0] and i[1] <= query[1] and i[1]-i[0] < query[1]-query[0]:\n return True\n return False",
"def test_interval_dict_structure(self):\n self.model = {}\n self.model['node_names'] = ['E1','E2','E3','E4','E5','E6']\n dummy_data=[1,2,3,5,6,7]\n # =============================================================================\n # Correct dict structure should be a nested dict with two layers, including\n # the interval name, the node names in that interval and then the index number\n # in the model['node_names] list.\n #\n # Nodes with values on the edge between two intervals should always be placed in\n # the interval with that value as the minimum.\n # =============================================================================\n correct_dict={'1.000 - 3.000': {'E1':0,'E2':1},\n '3.000 - 5.000': {'E3':2},\n '5.000 - 7.000': {'E4':3,'E5':4,'E6':5}}\n \n interval_results, interval_names = viswaternet.network.bin_parameter(self,dummy_data,self.model['node_names'],3)\n self.assertDictEqual(correct_dict,interval_results,\"bin_parameter is not producing correct dictionary structure.\")",
"def stellar_parameter_range(database, wg=None):\n\n if wg is None:\n nodes = database.retrieve(\"SELECT id, wg, name FROM nodes\")\n else:\n nodes = database.retrieve(\"SELECT id, wg, name FROM nodes WHERE wg = %s\",\n (utils.wg_as_int(wg), ))\n\n rows = []\n for node_id, node_wg, node_name in nodes:\n results = database.retrieve_table(\n \"\"\"SELECT teff, e_teff, logg, e_logg, mh, e_mh, xi, e_xi\n FROM results WHERE node_id = %s\"\"\", (node_id, ))\n\n name = \"WG{}/{}\".format(node_wg, node_name) if wg is None else node_name\n if results is None or len(results) == 0:\n rows.append([name] + [np.nan] * 16)\n continue\n\n row = [name]\n for column in (\"teff\", \"logg\", \"mh\", \"xi\"):\n for column in [column, \"e_{}\".format(column)]:\n row.extend([\n np.nanmin(results[column]),\n np.nanmax(results[column])\n ])\n rows.append(row)\n\n return Table(rows=rows, names=(\"Name\", \n \"Min. TEFF\", \"Max. TEFF\", \"Min. E_TEFF\", \"Max. E_TEFF\",\n \"Min. LOGG\", \"Max. LOGG\", \"Min. E_LOGG\", \"Max. E_LOGG\",\n \"Min. MH\", \"Max. MH\", \"Min. E_MH\", \"Max. E_MH\",\n \"Min. XI\", \"Max. XI\", \"Min. E_XI\", \"Max. E_XI\"))",
"def _validateregion(self, bounds):\n bounds.sort()\n for region in self.regions:\n if region['min'] <= bounds[0] and region['max'] >= bounds[1]:\n return None\n elif region['min'] >= bounds[0] and region['max'] <= bounds[1]:\n i = self.regions.index(region)\n del self.regions[i]\n self.regions.append({'min': bounds[0], 'max': bounds[1], \n 'group': group})\n elif region['min'] <= bounds[0] <= region['max'] and \\\n region['max'] < bounds[1]:\n bounds[0] = region['max'] + 1\n elif region['min'] <= bounds[1] <= region['max'] and \\\n region['min'] > bounds[0]:\n bounds[1] = region['min'] - 1\n return bounds",
"def is_work_available(start_dt, end_dt, intervals):\n def find_start_index():\n \"\"\" find the highest index of intervals for which the start_date (element [0]) is before (or at) start_dt\n \"\"\"\n def recursive_find_index(lower_bound, upper_bound):\n if upper_bound - lower_bound <= 1:\n if intervals[upper_bound][0] <= start_dt:\n return upper_bound\n return lower_bound\n index = (upper_bound + lower_bound) // 2\n if intervals[index][0] <= start_dt:\n return recursive_find_index(index, upper_bound)\n else:\n return recursive_find_index(lower_bound, index)\n\n if start_dt <= intervals[0][0] - tolerance:\n return -1\n if end_dt >= intervals[-1][1] + tolerance:\n return -1\n return recursive_find_index(0, len(intervals) - 1)\n\n if not intervals:\n return False\n\n tolerance = timedelta(minutes=1)\n start_index = find_start_index()\n if start_index != -1:\n for index in range(start_index, len(intervals)):\n if intervals[index][1] >= end_dt - tolerance:\n return True\n if len(intervals) == index + 1 or intervals[index + 1][0] - intervals[index][1] > tolerance:\n return False\n return False",
"def _autobounds(self):\n bounds = {}\n\n def check(prop, compare, extreme, val):\n opp = min if compare is max else max\n bounds.setdefault(prop, val)\n bounds[prop] = opp(compare(bounds[prop], val), extreme)\n\n def bound_check(lat_lon):\n lat, lon = lat_lon\n check('max_lat', max, 90, lat)\n check('min_lat', min, -90, lat)\n check('max_lon', max, 180, lon)\n check('min_lon', min, -180, lon)\n\n lat_lons = [lat_lon for feature in self._features.values() for\n lat_lon in feature.lat_lons]\n if not lat_lons:\n lat_lons.append(self._default_lat_lon)\n for lat_lon in lat_lons:\n bound_check(lat_lon)\n\n return bounds",
"def covers_overlaps(self, bounds):\n bounds = tuple(float(b) for b in bounds)\n return self.numba_rtree.covers_overlaps(bounds)",
"def in_geo_limits(args: argparse.Namespace, track_data: dict) -> bool:\n return (track_data['boundaries']['north'] <= args.north_lim and\n track_data['boundaries']['south'] >= args.south_lim and\n track_data['boundaries']['east'] <= args.east_lim and\n track_data['boundaries']['west'] >= args.west_lim)",
"def check_interval_bounds(begin, end):\n if begin.get_midpoint() >= end.get_midpoint():\n return False\n\n if begin.get_radius() is not None and end.get_radius() is not None:\n if begin.get_midpoint() - begin.get_radius() > \\\n end.get_midpoint() - end.get_radius():\n return False\n\n return True",
"def get_bounds(shakefile, parameter='pga', threshold=2):\n from mapio.shake import ShakeGrid\n\n shakemap = ShakeGrid.load(shakefile, adjust='res')\n if parameter == 'pga':\n vals = shakemap.getLayer('pga')\n elif parameter == 'pgv':\n vals = shakemap.getLayer('pgv')\n else:\n raise Exception('parameter not valid')\n xmin, xmax, ymin, ymax = vals.getBounds()\n lons = np.linspace(xmin, xmax, vals.getGeoDict().nx)\n lats = np.linspace(ymax, ymin, vals.getGeoDict().ny) # backwards so it plots right\n row, col = np.where(vals.getData() > float(threshold))\n lonmin = lons[col].min()\n lonmax = lons[col].max()\n latmin = lats[row].min()\n latmax = lats[row].max()\n\n boundaries1 = {'dx': 100, 'dy': 100., 'nx': 100., 'ny': 100} # dummy fillers, only really care about bounds\n if xmin < lonmin:\n boundaries1['xmin'] = lonmin\n else:\n boundaries1['xmin'] = xmin\n if xmax > lonmax:\n boundaries1['xmax'] = lonmax\n else:\n boundaries1['xmax'] = xmax\n if ymin < latmin:\n boundaries1['ymin'] = latmin\n else:\n boundaries1['ymin'] = ymin\n if ymax > latmax:\n boundaries1['ymax'] = latmax\n else:\n boundaries1['ymax'] = ymax\n\n return boundaries1",
"def get_bounds(shakefile, parameter='pga', threshold=2.0):\n shakemap = ShakeGrid.load(shakefile, adjust='res')\n if parameter == 'pga':\n vals = shakemap.getLayer('pga')\n elif parameter == 'pgv':\n vals = shakemap.getLayer('pgv')\n else:\n raise Exception('parameter not valid')\n xmin, xmax, ymin, ymax = vals.getBounds()\n lons = np.linspace(xmin, xmax, vals.getGeoDict().nx)\n lats = np.linspace(ymax, ymin, vals.getGeoDict().ny)\n row, col = np.where(vals.getData() > float(threshold))\n lonmin = lons[col].min()\n lonmax = lons[col].max()\n latmin = lats[row].min()\n latmax = lats[row].max()\n\n # dummy fillers, only really care about bounds\n boundaries1 = {'dx': 100, 'dy': 100., 'nx': 100., 'ny': 100}\n\n if xmin < lonmin:\n boundaries1['xmin'] = lonmin\n else:\n boundaries1['xmin'] = xmin\n if xmax > lonmax:\n boundaries1['xmax'] = lonmax\n else:\n boundaries1['xmax'] = xmax\n if ymin < latmin:\n boundaries1['ymin'] = latmin\n else:\n boundaries1['ymin'] = ymin\n if ymax > latmax:\n boundaries1['ymax'] = latmax\n else:\n boundaries1['ymax'] = ymax\n\n return boundaries1",
"def validRange(line):\n line_col = str.split(line)\n chrom = line_col[0]\n pos = line_col[1]\n# any(lower <= postcode <= upper for (lower, upper) in [(1000, 2249), (2555, 2574), ...])\n if any(float(low) <= float(pos) <= float(high) for (low,high) in TE_ranges[chrom]):\n return False\n return True",
"def _in_interval(value, low, up):\n if low <= value <= up:\n return True\n else:\n return False",
"def paired_interval_extend(uniq_fragment,fragment_cov,gtf_dic):\n out_dic = {}\n total_reads = 0\n for key in uniq_fragment.keys():\n chr_no = key[0]\n #print (frag_start,frag_end)\n frag_strand = key[3]\n interval_comp = uniq_fragment[key][0]\n complete_info = uniq_fragment[key][1]\n frag_cov = fragment_cov[key]\n total_reads += frag_cov\n geneNA = 'NA'\n geneType = 'NA'\n geneRegion = 'NA'\n flag = 0\n for trans in gtf_dic[(chr_no,frag_strand)]:\n frag_start,frag_end = key[1:3]\n # for trans in gtf_dic[('chr1','-')]:\n # if chr_no == 'chr1' and frag_strand == '-':\n if frag_start > trans[0] and frag_end < trans[1]:\n #print 'Hello!'\n # print (trans)\n geneNA = trans[4]\n geneType = trans[5]\n if geneType == 'protein_coding':\n CDS_start,CDS_end = trans[2:4]\n if frag_start >= CDS_start and frag_end <= CDS_end:\n geneRegion = 'CDS'\n elif frag_strand == '+':\n if frag_end <= CDS_start:\n geneRegion = '5UTR'\n elif frag_start < CDS_start and frag_end > CDS_start:\n geneRegion = '5UTR-CDS'\n elif frag_start < CDS_end and frag_end > CDS_end:\n geneRegion = 'CDS-3UTR'\n elif frag_start >= CDS_end:\n geneRegion = '3UTR'\n elif frag_strand == '-':\n if frag_end <= CDS_start:\n geneRegion = '3UTR'\n elif frag_start < CDS_start and frag_end > CDS_start:\n geneRegion = 'CDS-3UTR'\n elif frag_start < CDS_end and frag_end > CDS_end:\n geneRegion = '5UTR-CDS'\n elif frag_start >= CDS_end:\n geneRegion = '5UTR'\n else:\n geneRegion = 'Null'\n # print (frag_start,frag_end,CDS_start,CDS_end,geneNA,geneRegion)\n#------------------------------------------------------------------------------ intersect of fragments interval and exons interval\n frag_intersect = interval_comp & trans[-1]\n interval_comp_length = sum([interval_comp[a].upper- interval_comp[a].lower for a in range(0,len(interval_comp))])\n # print (interval_comp)\n # print (frag_intersect)\n#------------------------------------------------------------------------------ fragments located in introns\n if frag_intersect == P.empty(): \n flag = 1\n start_out = []\n length_out = []\n for interval_region in list(interval_comp):\n start_out.append(str(int(interval_region.lower - frag_start)))\n length_out.append(str(int(interval_region.upper - interval_region.lower)))\n out_dic.setdefault((chr_no,frag_start,frag_end,frag_strand),[]).append((chr_no,str(frag_start),str(frag_end),\\\n geneNA,geneType,frag_strand,\\\n str(frag_start),str(frag_end),'intron',str(len(start_out)),\\\n ','.join(length_out),','.join(start_out),str(frag_cov),flag,complete_info))\n else:\n if complete_info == 'complete':\n flag = 3\n #print interval_comp\n#------------------------------------------------------------------------------ reduce alignment noise\n frag_intersect_length = sum([frag_intersect[a].upper-frag_intersect[a].lower for a in range(0,len(frag_intersect))])\n absolute_diff = abs(frag_intersect_length-interval_comp_length)\n if absolute_diff == 0:\n#------------------------------------------------------------------------------ \n start_region = []\n length_region = []\n for region in frag_intersect:\n start_region.append(str(int(region.lower - frag_start)))\n length_region.append(str(int(region.upper - region.lower)))\n out_dic.setdefault((chr_no,frag_start,frag_end,frag_strand),[]).append((chr_no,str(frag_start),str(frag_end),\\\n geneNA,geneType,frag_strand,\\\n str(frag_start),str(frag_end),geneRegion,str(len(start_region)),\\\n ','.join(length_region),','.join(start_region),str(frag_cov),flag,complete_info))\n else:\n start_region = []\n length_region = []\n for region in interval_comp:\n start_region.append(str(int(region.lower - frag_start)))\n length_region.append(str(int(region.upper - region.lower)))\n out_dic.setdefault((chr_no,frag_start,frag_end,frag_strand),[]).append((chr_no,str(frag_start),str(frag_end),geneNA,geneType,\\\n frag_strand,str(frag_start),str(frag_end),'intron-containing',str(len(start_region)),\\\n ','.join(length_region),','.join(start_region),str(frag_cov),flag,complete_info))\n else:\n #print interval_comp\n #print frag_intersect\n#------------------------------------------------------------------------------ fragments boundaries located in exons\n #print frag_intersect[0][0],frag_start,frag_intersect[-1][1],frag_end\n #print abs_position\n # print (P.closedopen(frag_start,frag_end),trans[-1])\n interval_update = P.closedopen(frag_start,frag_end) & trans[-1]\n # print (interval_update)\n frag_trans_length = sum([interval_update[a].upper-interval_update[a].lower for a in range(0,len(interval_update))])\n absolute_diff = abs(frag_trans_length-interval_comp_length)\n #print absolute_diff\n #print geneRegion\n #print interval_comp\n #print abs_position\n if absolute_diff <= 300: #insert sequence length <=200nt\n #print frag_trans_length,interval_comp_length\n #print geneRegion\n flag = 2\n start_out = []\n length_out = []\n for interval_region in list(interval_update):\n start_out.append(str(int(interval_region.lower - frag_start)))\n length_out.append(str(int(interval_region.upper - interval_region.lower)))\n out_dic.setdefault((chr_no,frag_start,frag_end,frag_strand),[]).append((chr_no,str(frag_start),str(frag_end),\\\n geneNA,geneType,frag_strand,\\\n str(frag_start),str(frag_end),geneRegion,str(len(start_out)),\\\n ','.join(length_out),','.join(start_out),str(frag_cov),flag,complete_info))\n else:\n # print (trans)\n flag = 1\n start_out = []\n length_out = []\n for interval_region in list(interval_comp):\n start_out.append(str(int(interval_region.lower - frag_start)))\n length_out.append(str(int(interval_region.upper - interval_region.lower)))\n out_dic.setdefault((chr_no,frag_start,frag_end,frag_strand),[]).append((chr_no,str(frag_start),str(frag_end),\\\n geneNA,geneType,frag_strand,\\\n str(frag_start),str(frag_end),'intron-containing',str(len(start_out)),\\\n ','.join(length_out),','.join(start_out),str(frag_cov),flag,complete_info))\n if flag == 0:\n start_out = []\n length_out = []\n for interval_region in list(interval_comp):\n start_out.append(str(int(interval_region.lower - frag_start)))\n length_out.append(str(int(interval_region.upper - interval_region.lower)))\n out_dic[(chr_no,frag_start,frag_end,frag_strand)] = [(chr_no,str(frag_start),str(frag_end),'intergenic','intergenic',frag_strand,\\\n str(frag_start),str(frag_end),geneRegion,str(len(start_out)),\\\n ','.join(length_out),','.join(start_out),str(frag_cov),flag,complete_info)]\n print ('Total treated fragments: ' + str(total_reads))\n return out_dic",
"def bond_checker(atom, dict, bond_dict):\n bound = []\n for item, values in dict.items():\n bond_range = check_bond_len(bond_dict, atom[0], values[\"element\"]) + 0.2\n if distance_checker(atom[1:], values[\"coor\"]) <= bond_range:\n bound.append(item)\n return bound",
"def is_in_hotspot(self):\r\n in_hotspot = False\r\n hotspots = parser.parse_hotspot_bed()\r\n \r\n if hotspots.get(self.chrom): \r\n chrom_hotspots = hotspots[self.chrom]\r\n \r\n for interval in chrom_hotspots: \r\n if interval[0] <= self.pos <= interval[1]:\r\n in_hotspot = True\r\n break\r\n \r\n return in_hotspot",
"def match_regions(self):\n l = []\n for r1 in self.regions_names():\n for r2 in self.metric.index:\n r11 = r1.replace('-', ' ').lower()\n r22 = r2.replace('-', ' ').lower()\n l.append([r1,r2,fuzz.ratio(r11, r22)])\n\n matched = np.array([x for x in l if x[2] > 80])\n\n return {key: value for (key, value) in matched[:,[1,0]]}",
"def finetune_intron_boundaries(abfgp_genemodel,introndata,\n array_algpresence,array_algsimilarity,verbose=True):\n\n # Global Variable Imports\n FINETUNE_ACCEPTOR_NT_OFFSET = 12\n FINETUNE_DONOR_NT_OFFSET = 12\n FINETUNE_ACCEPTOR_NT_OFFSET = 18\n FINETUNE_DONOR_NT_OFFSET = 18\n from settings.genestructure import MIN_INTRON_NT_LENGTH\n\n # list with adjusted boundaries\n refined_boundaries = []\n\n # recreate lists of ABGFP exons & introns\n abfgp_exons = [ abfgp_genemodel[pos] for pos in range(0,len(abfgp_genemodel),2) ]\n abfgp_introns = [ abfgp_genemodel[pos] for pos in range(1,len(abfgp_genemodel),2) ]\n\n for intron_pos in range(0,len(abfgp_introns)):\n intron = abfgp_introns[intron_pos]\n if not intron: continue\n if intron.__class__.__name__ == 'SequenceErrorConnectingOrfs': continue\n has_been_printed = False\n finetune_acceptor_range = range(intron.acceptor.pos-FINETUNE_ACCEPTOR_NT_OFFSET,\n intron.acceptor.pos+FINETUNE_ACCEPTOR_NT_OFFSET+1)\n finetune_donor_range = range(intron.donor.pos-FINETUNE_DONOR_NT_OFFSET,\n intron.donor.pos+FINETUNE_DONOR_NT_OFFSET+1)\n\n # assign branchpoint in current intron\n intron.assign_bp_and_ppts()\n\n # start searching acceptor based\n alternatives = []\n for acceptor in intron.orfAcceptor._acceptor_sites:\n if acceptor.pos != intron.acceptor.pos and\\\n acceptor.phase != intron.acceptor.phase and\\\n acceptor.pos in finetune_acceptor_range:\n # now see if we can find a donor for this phase too\n for donor in intron.orfDonor._donor_sites:\n if donor.pos != intron.donor.pos and\\\n donor.phase != intron.donor.phase and\\\n donor.phase == acceptor.phase and\\\n donor.pos in finetune_donor_range:\n # get the next exon (3'of this intron)\n next_exon = abfgp_exons[intron_pos+1]\n prev_exon = abfgp_exons[intron_pos]\n\n if not has_been_printed:\n has_been_printed = True\n ####################################################\n if verbose: print \"FINETUNING INTRON::\", intron\n ####################################################\n\n # get data on this alternative acceptor/donor combination\n test_intron = IntronConnectingOrfs(donor,acceptor,None,intron.orfDonor,intron.orfAcceptor)\n test_intron.assign_bp_and_ppts()\n\n # test if refinement will result in a long enough intron\n if test_intron.length < MIN_INTRON_NT_LENGTH: continue\n\n scorelist = []\n # score 1: is donor.pssm_score `higher`?\n scorelist.append( _finetune_splicesite_comparison(intron.donor,donor) )\n # score 2: is acceptor.pssm_score `higher`?\n scorelist.append( _finetune_splicesite_comparison(intron.acceptor,acceptor) )\n # score 3: branchpoint comparison?\n scorelist.append( _branchpoint_comparison(intron,test_intron) )\n # score 4: ppt comparison?\n scorelist.append( _polypyrimidinetract_comparison(intron,test_intron) )\n # score 5: is algsimilarity ratio increased (==better)?\n scorelist.append( _algsimilarity_comparison(intron,test_intron,prev_exon,next_exon,array_algsimilarity) )\n\n # evaluate scorelist; improved intron boundary or not?\n # use donor, acceptor, branchpoint & ppt, do *NOT* use algsim score\n if scorelist[0:4].count(False) == 0 and scorelist[0:4].count(True) >= 1:\n alternatives.append( ( donor, acceptor, scorelist ) )\n is_accepted = True\n else:\n is_accepted = False\n\n ########################################################\n if verbose:\n print \"alternatives:\", donor, acceptor,\n print intron.donor.pos - donor.pos,\n print intron.acceptor.pos - acceptor.pos,\n print scorelist, is_accepted,\n print \"BPcur:\",intron.get_branchpoint_nt_distance(),\n print \"alt:\",\n print test_intron.get_branchpoint_nt_distance()\n ########################################################\n\n # now evaluate the alternatived and take the best one\n if not alternatives:\n continue\n elif len(alternatives) == 1:\n refined_boundaries.append( ( intron.donor, alternatives[0][0] ) )\n refined_boundaries.append( ( intron.acceptor, alternatives[0][1] ) )\n else:\n # multiple! again, take the *best* one\n pass\n\n # return list of refined_boundaries\n return refined_boundaries",
"def ValidClusterRanges(self):\n for cluster_range in self.cluster_ranges:\n the_range = cluster_range.split(\"-\")\n print(f\"Checking that range {the_range} falls within our data area\")\n try:\n if int(the_range[0]) < self.low_data_cluster or int(the_range[1]) > self.high_data_cluster:\n print(f\"False. {the_range[0]} or {the_range[1]} is outside of our data area\")\n return False\n except TypeError as t_err:\n print(f\"Error. Range does not appear to be an int\")\n return False\n return True",
"def test_region_bounds_keys(i07_nexus: I07Nexus,\n region_number, kind, result):\n assert i07_nexus._get_region_bounds_key(region_number, kind) == result",
"def nanobot_in_range_of_whole_node(self, nanobot):\n for corner in itertools.product(*zip(self.mins, self.maxs)):\n if manhattan_dist(nanobot.coord, corner) > nanobot.r:\n return False\n\n return True",
"def range_dic_(df_):\n range_dic = {}\n for man in df_['maneuver']:\n trial_indx = df_.index[df_['maneuver'] == man].tolist()\n range_ = (min(trial_indx), max(trial_indx))\n range_dic.update({man: range_})\n return range_dic",
"def range_dic_(df_):\n range_dic = {}\n for man in df_['maneuver']:\n trial_indx = df_.index[df_['maneuver'] == man].tolist()\n range_ = (min(trial_indx), max(trial_indx))\n range_dic.update({man: range_})\n return range_dic",
"def calculate_overlap_ranges(network, reserved_network):\n if network.overlaps(reserved_network):\n ranges = []\n\n # If the lower boundary of current head is smaller than the lower boundary of reserved_network\n # It means the 'reserved_network' network is necessarily from 'the right' of head, and its available\n if network[0] < reserved_network[0]:\n ranges.append({'lower_ip': network[0], 'upper_ip': reserved_network[0] - 1, 'available': True})\n\n # Append the overlapping network as NOT available\n ranges.append({'lower_ip': reserved_network[0], 'upper_ip': reserved_network[-1], 'available': False})\n\n if reserved_network[-1] < network[-1]:\n ranges.append({'lower_ip': reserved_network[-1] + 1, 'upper_ip': network[-1], 'available': True})\n return ranges\n else:\n return [{'lower_ip': network[0], 'upper_ip': network[-1], 'available': True}]",
"def verify_aggWasterwaterPathway(self):\n self.c.execute('''SELECT aggCode, (aggC1+aggC2+aggPercWithoutTreatment)\n FROM Agglomerations\n WHERE (aggC1 + aggC2 + aggPercWithoutTreatment) != 100 \n AND aggState=1\n ''')\n res = self.c.fetchall()\n if (len(res) > 0):\n return [False,\n \"In the agglomeration '%s' aggC1 + aggC2 + aggPercWithoutTreatment is equal to must be equal '%s' instead of 100%%\",\n res]\n else:\n return [True]",
"def check_gtis(gti):\n if len(gti) < 1:\n raise ValueError(\"Empty GTIs.\")\n\n for g in gti:\n if np.size(g) != 2 or np.ndim(g) != 1:\n raise TypeError(\n \"Please check the formatting of the GTIs. They need to be\"\n \" provided as [[gti00, gti01], [gti10, gti11], ...].\"\n )\n\n gti = np.array(gti)\n gti_start = gti[:, 0]\n gti_end = gti[:, 1]\n\n # Check that GTIs are well-behaved\n if not np.all(gti_end >= gti_start):\n raise ValueError(\"The GTI end times must be larger than the \" \"GTI start times.\")\n\n # Check that there are no overlaps in GTIs\n if not np.all(gti_start[1:] >= gti_end[:-1]):\n raise ValueError(\"This GTI has overlaps.\")\n\n return",
"def find_valid_region(self, spat_rel):\n # The top-down view is transposed.\n if spat_rel == \"above\":\n is_valid = np.vectorize(lambda x: (x > 157.5) or (x <= -157.5))\n elif spat_rel == \"above-right\":\n is_valid = np.vectorize(lambda x: (x > -157.5) and (x <= -112.5))\n elif spat_rel == \"right\":\n is_valid = np.vectorize(lambda x: (x > -112.5 and x <= -67.5))\n elif spat_rel == \"below-right\":\n is_valid = np.vectorize(lambda x: (x > -67.5) and (x <= -22.5))\n elif spat_rel == \"below\":\n is_valid = np.vectorize(lambda x: (x > -22.5) and (x <= 22.5))\n elif spat_rel == \"below-left\":\n is_valid = np.vectorize(lambda x: (x > 22.5) and (x <= 67.5))\n elif spat_rel == \"left\":\n is_valid = np.vectorize(lambda x: (x > 67.5) and (x <= 112.5))\n elif spat_rel == \"above-left\":\n is_valid = np.vectorize(lambda x: (x > 112.5) and (x <= 157.5))\n return is_valid",
"def in_range(center_bot, nanobots):\n return [b for b in nanobots if center_bot.distance_to(b) <= center_bot.strength]",
"def range_8(configuration):\n range_dict_all = {\n # updated aLIGO design sensitivity range from 197.5 to 181.5 Mpc on 9 Apr 2018 to reflect T1800044-v4\n \"HL\" : {'H1' : 181.5, 'L1' : 181.5},\n \"HLV\" : {'H1' : 181.5, 'L1' : 181.5, 'V1': 128.3 },\n \"HLVK\" : {'H1' : 181.5, 'L1' : 181.5, 'V1': 128.3, 'K1' : 160.0},\n \"HLVKI\" : {'H1' : 181.5, 'L1' : 181.5, 'V1': 128.3, 'K1' : 160.0, 'I1' : 181.5},\n \"GW170817\" : {'H1': 107/2.26 *1.26 , 'L1': 218/2.26, 'V1': 58/2.26}, # 1.26 is the improvement factor for H1's range due to data processing.\n \"GW170817_without_Virgo\" : {'H1': 107/2.26 *1.26 , 'L1': 218/2.26},\n \"GW170814\" : {'H1': 53, 'L1': 98, 'V1': 26}, # 1.26 is the improvement factor for H1's range due to data processing.\n \"design\" : {'H1' : 181.5, 'L1' : 181.5, 'V1': 128.3 },\n \"early\" : {'H1' : 60., 'L1': 60.},\n \"half_ligo\" : {'H1' : 99, 'L1' : 99, 'V1': 128.3 },\n \"half_virgo\" : {'H1' : 181.5, 'L1' : 181.5, 'V1': 64 },\n \"nosrm\" : {'H1' : 159, 'L1' : 159, 'V1': 109 },\n \"india\" : {'H1' : 181.5, 'L1' : 181.5, 'V1': 128.3, \"I1\" : 181.5 },\n \"kagra\" : {'H1' : 181.5, 'L1' : 181.5, 'V1': 128.3, \"I1\" : 181.5 , \\\n \"K1\" : 160.0},\n \"bala\" : {'H1' : 181.5, 'H2' : 181.5, 'L1' : 181.5, 'V1': 128.3, \\\n \"I1\" : 181.5 , \"K1\" : 160.0},\n \"sa\" : {'H1' : 181.5, 'L1' : 181.5, 'V1': 128.3, \"I1\" : 181.5 , \\\n \"K1\" : 160.0, \"S1\":181.5},\n \"sa2\" : {'H1' : 181.5, 'L1' : 181.5, 'V1': 128.3, \"I1\" : 181.5 , \\\n \"K1\" : 160.0, \"S1\":181.5},\n \"steve\" : {'H1' : 160.0, 'L1' : 160.0, 'V1': 160.0, \"I1\" : 160.0 },\n \"s6vsr2\" : {'H1' : 20., 'L1' : 20., 'V1': 8. }\n }\n return(range_dict_all[configuration])"
] | [
"0.55903125",
"0.5488711",
"0.54818463",
"0.5360363",
"0.5360247",
"0.53407776",
"0.5336744",
"0.52925354",
"0.52756387",
"0.5243182",
"0.5230628",
"0.5215184",
"0.51692766",
"0.5168161",
"0.515725",
"0.515458",
"0.51419836",
"0.51233953",
"0.5118567",
"0.5113086",
"0.51112384",
"0.50867933",
"0.50756073",
"0.50756073",
"0.50533134",
"0.50399566",
"0.50362855",
"0.5034041",
"0.50322694",
"0.49954948"
] | 0.5631509 | 0 |
returning all elements names from ``MedicationKnowledgeDefinitional`` according specification, with preserving original sequence order. | def elements_sequence(cls):
return [
"id",
"extension",
"modifierExtension",
"definition",
"doseForm",
"intendedRoute",
"ingredient",
"drugCharacteristic",
] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def elements_sequence(cls):\n return [\n \"id\",\n \"meta\",\n \"implicitRules\",\n \"language\",\n \"text\",\n \"contained\",\n \"extension\",\n \"modifierExtension\",\n \"identifier\",\n \"code\",\n \"status\",\n \"author\",\n \"intendedJurisdiction\",\n \"name\",\n \"relatedMedicationKnowledge\",\n \"associatedMedication\",\n \"productType\",\n \"monograph\",\n \"preparationInstruction\",\n \"cost\",\n \"monitoringProgram\",\n \"indicationGuideline\",\n \"medicineClassification\",\n \"packaging\",\n \"clinicalUseIssue\",\n \"storageGuideline\",\n \"regulatory\",\n \"definitional\",\n ]",
"def elements_sequence(cls):\n return [\n \"id\",\n \"extension\",\n \"modifierExtension\",\n \"treatmentIntent\",\n \"dosage\",\n \"administrationTreatment\",\n \"patientCharacteristic\",\n ]",
"def elementNames(self):\n nel = self.nElements()\n return map(self.elementName,range(nel))",
"def get_names(dep):\n res = [dep.name]\n return res",
"def elements_sequence(cls):\n return [\n \"id\",\n \"meta\",\n \"implicitRules\",\n \"language\",\n \"text\",\n \"contained\",\n \"extension\",\n \"modifierExtension\",\n \"identifier\",\n \"basedOn\",\n \"status\",\n \"category\",\n \"code\",\n \"subject\",\n \"context\",\n \"effectiveDateTime\",\n \"effectivePeriod\",\n \"issued\",\n \"performer\",\n \"specimen\",\n \"result\",\n \"imagingStudy\",\n \"image\",\n \"conclusion\",\n \"codedDiagnosis\",\n \"presentedForm\",\n ]",
"def elements_sequence(cls):\n return [\n \"id\",\n \"meta\",\n \"implicitRules\",\n \"language\",\n \"text\",\n \"contained\",\n \"extension\",\n \"modifierExtension\",\n \"identifier\",\n \"patient\",\n \"date\",\n \"authority\",\n \"recommendation\",\n ]",
"def get_names(self):\n return [doc['name'] for doc in self.vocab]",
"def elements_sequence(cls):\n return [\n \"id\",\n \"extension\",\n \"modifierExtension\",\n \"vaccineCode\",\n \"targetDisease\",\n \"contraindicatedVaccineCode\",\n \"forecastStatus\",\n \"forecastReason\",\n \"dateCriterion\",\n \"description\",\n \"series\",\n \"doseNumberPositiveInt\",\n \"doseNumberString\",\n \"seriesDosesPositiveInt\",\n \"seriesDosesString\",\n \"supportingImmunization\",\n \"supportingPatientInformation\",\n ]",
"def elements_sequence(cls):\n return [\n \"id\",\n \"extension\",\n \"modifierExtension\",\n \"regulatoryAuthority\",\n \"substitution\",\n \"schedule\",\n \"maxDispense\",\n ]",
"def elements_sequence(cls):\n return [\n \"id\",\n \"meta\",\n \"implicitRules\",\n \"language\",\n \"text\",\n \"contained\",\n \"extension\",\n \"modifierExtension\",\n \"identifier\",\n \"basedOn\",\n \"partOf\",\n \"status\",\n \"patient\",\n \"type\",\n \"suppliedItem\",\n \"occurrenceDateTime\",\n \"occurrencePeriod\",\n \"occurrenceTiming\",\n \"supplier\",\n \"destination\",\n \"receiver\",\n ]",
"def elements_sequence(cls):\n return [\n \"id\",\n \"extension\",\n \"modifierExtension\",\n \"description\",\n \"definitionReference\",\n \"definitionCanonical\",\n \"definitionCodeableConcept\",\n \"definitionExpression\",\n \"method\",\n \"device\",\n \"exclude\",\n \"timeFromStart\",\n \"groupMeasure\",\n ]",
"def furanose_names(self):\n output = set()\n for item in self.monomers():\n if item in self.furanose_fac:\n output.add(self.furanose_fac[item][\"name\"])\n return list(output)",
"def get_specification_kinds(specifications):\n specifications.setdefault(\"manual event models\", {\"tags\": [\"manual event models\"]})\n return [\"manual event models\"]",
"def _generate_expanded_column_names(self):\n\n names = []\n # Get names of the descriptors\n des_names = [column for column in self.descriptor_dataframe][1:]\n\n # Generate expanded descriptor names for each compound\n for i in range(self.total_compounds):\n for des_name in des_names:\n name = 'compund_{}_{}'.format(i, des_name)\n names.append(name)\n\n return names",
"def seqNames(self):\n\t\tseqDict = self.sequenceDict()\n\t\treturn seqDict.keys()",
"def elements_sequence(cls):\n return [\"id\", \"extension\", \"modifierExtension\", \"indication\", \"dosingGuideline\"]",
"def names(self):\n return list(item.name for item in self.mechanisms)",
"def elements_sequence(cls):\n return [\"id\", \"extension\", \"modifierExtension\", \"quantity\", \"period\"]",
"def elements_sequence(cls):\n return [\n \"id\",\n \"meta\",\n \"implicitRules\",\n \"language\",\n \"text\",\n \"contained\",\n \"extension\",\n \"modifierExtension\",\n \"identifier\",\n \"instantiatesCanonical\",\n \"instantiatesUri\",\n \"basedOn\",\n \"priorRequest\",\n \"groupIdentifier\",\n \"status\",\n \"intent\",\n \"priority\",\n \"codeReference\",\n \"codeCodeableConcept\",\n \"parameter\",\n \"subject\",\n \"encounter\",\n \"occurrenceDateTime\",\n \"occurrencePeriod\",\n \"occurrenceTiming\",\n \"authoredOn\",\n \"requester\",\n \"performerType\",\n \"performer\",\n \"reasonCode\",\n \"reasonReference\",\n \"insurance\",\n \"supportingInfo\",\n \"note\",\n \"relevantHistory\",\n ]",
"def elementName(self,m):\n return _cantera.phase_getstring(self._phase_id,1,m)",
"def get_field_names(self, declared_fields, info):\n return self._requested_fields",
"def elements_sequence(cls):\n return [\n \"id\",\n \"meta\",\n \"implicitRules\",\n \"language\",\n \"text\",\n \"contained\",\n \"extension\",\n \"modifierExtension\",\n \"identifier\",\n \"definitionUri\",\n \"definitionCanonical\",\n \"status\",\n \"partOf\",\n \"code\",\n \"subject\",\n \"context\",\n \"occurrenceDateTime\",\n \"occurrencePeriod\",\n \"occurrenceTiming\",\n \"performer\",\n \"performingOrganization\",\n \"requestingOrganization\",\n \"costCenter\",\n \"quantity\",\n \"bodysite\",\n \"factorOverride\",\n \"priceOverride\",\n \"overrideReason\",\n \"enterer\",\n \"enteredDate\",\n \"reason\",\n \"service\",\n \"productReference\",\n \"productCodeableConcept\",\n \"account\",\n \"note\",\n \"supportingInformation\",\n ]",
"def elements_sequence(cls):\n return [\"id\", \"extension\", \"modifierExtension\", \"type\", \"dosage\"]",
"def elements_sequence(cls):\n return [\n \"id\",\n \"extension\",\n \"modifierExtension\",\n \"title\",\n \"requirement\",\n \"relatedData\",\n ]",
"def elements_sequence(cls):\n return [\n \"id\",\n \"extension\",\n \"modifierExtension\",\n \"title\",\n \"requirement\",\n \"relatedData\",\n ]",
"def elements_sequence(cls):\n return [\"id\", \"extension\", \"modifierExtension\", \"type\", \"name\"]",
"def getNamesFromDescr(descr):\n i = getIter(descr)\n if not i:\n return\n\n try:\n item = i.next()\n while item:\n if isinstance(item[1], str):\n yield item[0]\n else:\n l = []\n for j in getNamesFromDescr(item[1]):\n l.append(j)\n r = (item[0], l)\n yield r\n item = i.next()\n except StopIteration:\n pass",
"def elements_sequence(cls):\n return [\"id\", \"extension\", \"modifierExtension\", \"cost\", \"packagedProduct\"]",
"def elements_sequence(cls):\n return [\n \"id\",\n \"extension\",\n \"modifierExtension\",\n \"sequence\",\n \"text\",\n \"additionalInstruction\",\n \"patientInstruction\",\n \"timing\",\n \"asNeeded\",\n \"asNeededFor\",\n \"site\",\n \"route\",\n \"method\",\n \"doseAndRate\",\n \"maxDosePerPeriod\",\n \"maxDosePerAdministration\",\n \"maxDosePerLifetime\",\n ]",
"def variables_used (self) :\r\n\t\t## These names do not contain dimension specification (everything in brackets\r\n\t\t## that comes after a name is am array index - either the arry was declared\r\n\t\t## correctly or it is wrong anyway, there is no implicit declaration of arrays) !\r\n\r\n\t\tresult = []\r\n\r\n\t\tfor l in self.equ_lists :\r\n\t\t\tfor var_name in l :\r\n\t\t\t\tresult.append(var_name[0])\r\n\t\treturn result"
] | [
"0.5559592",
"0.55173665",
"0.54835874",
"0.54470605",
"0.5411243",
"0.53615123",
"0.53325474",
"0.53293043",
"0.5318013",
"0.5305079",
"0.5292543",
"0.5288954",
"0.52888376",
"0.5249178",
"0.5247725",
"0.52077264",
"0.5173604",
"0.51669055",
"0.5159579",
"0.515878",
"0.5155112",
"0.5149284",
"0.5135487",
"0.5122561",
"0.5122561",
"0.5114945",
"0.5096236",
"0.50799245",
"0.50748587",
"0.5058961"
] | 0.55813897 | 0 |
returning all elements names from ``MedicationKnowledgeIndicationGuideline`` according specification, with preserving original sequence order. | def elements_sequence(cls):
return ["id", "extension", "modifierExtension", "indication", "dosingGuideline"] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def names(self):\n return list(item.name for item in self.mechanisms)",
"def get_specification_kinds(specifications):\n specifications.setdefault(\"manual event models\", {\"tags\": [\"manual event models\"]})\n return [\"manual event models\"]",
"def elements_sequence(cls):\n return [\n \"id\",\n \"meta\",\n \"implicitRules\",\n \"language\",\n \"text\",\n \"contained\",\n \"extension\",\n \"modifierExtension\",\n \"identifier\",\n \"code\",\n \"status\",\n \"author\",\n \"intendedJurisdiction\",\n \"name\",\n \"relatedMedicationKnowledge\",\n \"associatedMedication\",\n \"productType\",\n \"monograph\",\n \"preparationInstruction\",\n \"cost\",\n \"monitoringProgram\",\n \"indicationGuideline\",\n \"medicineClassification\",\n \"packaging\",\n \"clinicalUseIssue\",\n \"storageGuideline\",\n \"regulatory\",\n \"definitional\",\n ]",
"def elements_sequence(cls):\n return [\n \"id\",\n \"extension\",\n \"modifierExtension\",\n \"definition\",\n \"doseForm\",\n \"intendedRoute\",\n \"ingredient\",\n \"drugCharacteristic\",\n ]",
"def elements_sequence(cls):\n return [\n \"id\",\n \"extension\",\n \"modifierExtension\",\n \"treatmentIntent\",\n \"dosage\",\n \"administrationTreatment\",\n \"patientCharacteristic\",\n ]",
"def getNametagJoints(self):\n # Not sure what the name is right now.\n return []",
"def elements_sequence(cls):\n return [\n \"id\",\n \"meta\",\n \"implicitRules\",\n \"language\",\n \"text\",\n \"contained\",\n \"extension\",\n \"modifierExtension\",\n \"identifier\",\n \"patient\",\n \"date\",\n \"authority\",\n \"recommendation\",\n ]",
"def elements_sequence(cls):\n return [\n \"id\",\n \"meta\",\n \"implicitRules\",\n \"language\",\n \"text\",\n \"contained\",\n \"extension\",\n \"modifierExtension\",\n \"identifier\",\n \"basedOn\",\n \"status\",\n \"category\",\n \"code\",\n \"subject\",\n \"context\",\n \"effectiveDateTime\",\n \"effectivePeriod\",\n \"issued\",\n \"performer\",\n \"specimen\",\n \"result\",\n \"imagingStudy\",\n \"image\",\n \"conclusion\",\n \"codedDiagnosis\",\n \"presentedForm\",\n ]",
"def get_parameters_with_expert_knowledge(self) -> List[str]:\n return sorted(list(set([p for p, _ in self.expert_knowledge])))",
"def get_names(lines): \n next = False \n names = []\n for line in lines:\n if next:\n if len(line) == 1:\n break\n else:\n tmp = line.split()\n names.append(tmp[1])\n if line.startswith('Sequences loaded ...'):\n next = True\n return names",
"def elementName(self,m):\n return _cantera.phase_getstring(self._phase_id,1,m)",
"def elements_sequence(cls):\n return [\n \"id\",\n \"meta\",\n \"implicitRules\",\n \"language\",\n \"text\",\n \"contained\",\n \"extension\",\n \"modifierExtension\",\n \"identifier\",\n \"basedOn\",\n \"partOf\",\n \"status\",\n \"patient\",\n \"type\",\n \"suppliedItem\",\n \"occurrenceDateTime\",\n \"occurrencePeriod\",\n \"occurrenceTiming\",\n \"supplier\",\n \"destination\",\n \"receiver\",\n ]",
"def getNamesFromDescr(descr):\n i = getIter(descr)\n if not i:\n return\n\n try:\n item = i.next()\n while item:\n if isinstance(item[1], str):\n yield item[0]\n else:\n l = []\n for j in getNamesFromDescr(item[1]):\n l.append(j)\n r = (item[0], l)\n yield r\n item = i.next()\n except StopIteration:\n pass",
"def elements_sequence(cls):\n return [\n \"id\",\n \"extension\",\n \"modifierExtension\",\n \"description\",\n \"definitionReference\",\n \"definitionCanonical\",\n \"definitionCodeableConcept\",\n \"definitionExpression\",\n \"method\",\n \"device\",\n \"exclude\",\n \"timeFromStart\",\n \"groupMeasure\",\n ]",
"def get_column_names(self):\n names = []\n names.append(self.question_column + \"_agree_lot\")\n names.append(self.question_column + \"_agree_little\")\n names.append(self.question_column + \"_neither\")\n names.append(self.question_column + \"_dis_little\")\n names.append(self.question_column + \"_dis_lot\")\n return names",
"def elements_sequence(cls):\n return [\n \"id\",\n \"meta\",\n \"implicitRules\",\n \"language\",\n \"text\",\n \"contained\",\n \"extension\",\n \"modifierExtension\",\n \"identifier\",\n \"instantiatesCanonical\",\n \"instantiatesUri\",\n \"basedOn\",\n \"priorRequest\",\n \"groupIdentifier\",\n \"status\",\n \"intent\",\n \"priority\",\n \"codeReference\",\n \"codeCodeableConcept\",\n \"parameter\",\n \"subject\",\n \"encounter\",\n \"occurrenceDateTime\",\n \"occurrencePeriod\",\n \"occurrenceTiming\",\n \"authoredOn\",\n \"requester\",\n \"performerType\",\n \"performer\",\n \"reasonCode\",\n \"reasonReference\",\n \"insurance\",\n \"supportingInfo\",\n \"note\",\n \"relevantHistory\",\n ]",
"def elementNames(self):\n nel = self.nElements()\n return map(self.elementName,range(nel))",
"def seqNames(self):\n\t\tseqDict = self.sequenceDict()\n\t\treturn seqDict.keys()",
"def get_names(self):\n return [doc['name'] for doc in self.vocab]",
"def elements_sequence(cls):\n return [\n \"id\",\n \"extension\",\n \"modifierExtension\",\n \"vaccineCode\",\n \"targetDisease\",\n \"contraindicatedVaccineCode\",\n \"forecastStatus\",\n \"forecastReason\",\n \"dateCriterion\",\n \"description\",\n \"series\",\n \"doseNumberPositiveInt\",\n \"doseNumberString\",\n \"seriesDosesPositiveInt\",\n \"seriesDosesString\",\n \"supportingImmunization\",\n \"supportingPatientInformation\",\n ]",
"def itemnames():\n g = ['KIS_NA_39', 'VII_57', 'MX_48', 'MX_56', 'KIS_NA_42', 'VII_54',\n 'MX_S_48', 'MX_S_52', 'MX_52', 'KIS_NA_45', 'KIS_NA_51', 'MIP_45',\n 'MIP_49', 'MIP_52', 'MIP_plus_48', 'MIP_plus_51', 'MX_42', 'MX_45',\n 'MIP_G_42', 'KIS_42', 'KIS_NA_48']\n return(g)",
"def get_sequence_names(self):\r\n return Wiki().sequences_for_article_url(self.url).keys()",
"def _get_titles_dependencies_for_question(schema, question):\n dependencies = AnswerDependencies()\n\n answer_ids = schema.get_answer_ids_for_question(question['id'])\n\n when_clauses = [title.get('when')[0] for title in question.get('titles', []) if title.get('when')]\n\n for when_clause in when_clauses:\n when_id = when_clause.get('id')\n for answer_id in answer_ids:\n dependencies.add(when_id, answer_id)\n\n return dependencies",
"def names(self) -> list[str]:",
"def get_names(dep):\n res = [dep.name]\n return res",
"def elements_sequence(cls):\n return [\n \"id\",\n \"meta\",\n \"implicitRules\",\n \"language\",\n \"text\",\n \"contained\",\n \"extension\",\n \"modifierExtension\",\n \"url\",\n \"identifier\",\n \"version\",\n \"name\",\n \"title\",\n \"shortTitle\",\n \"subtitle\",\n \"status\",\n \"date\",\n \"description\",\n \"note\",\n \"useContext\",\n \"publisher\",\n \"contact\",\n \"author\",\n \"editor\",\n \"reviewer\",\n \"endorser\",\n \"relatedArtifact\",\n \"actual\",\n \"characteristicCombination\",\n \"characteristic\",\n \"handling\",\n \"category\",\n ]",
"def find_resonator_names(measurement):\n res_names = []\n for k in measurement.keys():\n if 'R' in k:\n res_names.append(k)\n return res_names",
"def get_representative_words(self, phi=None):\n phi = phi if phi is not None else self.phi\n for i in range(self.n_components):\n print(\"Topic\", i)\n c = np.argsort(self.phi[i, :])\n for j in c[-1:-11:-1]:\n print(self.list_ind2word[j], phi[i, j])",
"def elements_sequence(cls):\n return [\n \"id\",\n \"meta\",\n \"implicitRules\",\n \"language\",\n \"text\",\n \"contained\",\n \"extension\",\n \"modifierExtension\",\n \"identifier\",\n \"instantiatesCanonical\",\n \"instantiatesUri\",\n \"basedOn\",\n \"replaces\",\n \"groupIdentifier\",\n \"status\",\n \"intent\",\n \"priority\",\n \"code\",\n \"subject\",\n \"encounter\",\n \"authoredOn\",\n \"author\",\n \"reason\",\n \"goal\",\n \"note\",\n \"action\",\n ]",
"def elements_sequence(cls):\n return [\n \"id\",\n \"meta\",\n \"implicitRules\",\n \"language\",\n \"text\",\n \"contained\",\n \"extension\",\n \"modifierExtension\",\n \"identifier\",\n \"status\",\n \"issued\",\n \"applies\",\n \"subject\",\n \"topic\",\n \"authority\",\n \"domain\",\n \"type\",\n \"subType\",\n \"action\",\n \"actionReason\",\n \"decisionType\",\n \"contentDerivative\",\n \"securityLabel\",\n \"agent\",\n \"signer\",\n \"valuedItem\",\n \"term\",\n \"bindingAttachment\",\n \"bindingReference\",\n \"bindingReference\",\n \"bindingReference\",\n \"friendly\",\n \"legal\",\n \"rule\",\n ]"
] | [
"0.53641224",
"0.5295433",
"0.5286177",
"0.52009326",
"0.518853",
"0.5163249",
"0.5158666",
"0.51429915",
"0.5131586",
"0.5112963",
"0.51038444",
"0.50617266",
"0.50558203",
"0.50551116",
"0.5047262",
"0.50398177",
"0.5026788",
"0.50196815",
"0.49980664",
"0.4971098",
"0.49673674",
"0.4962161",
"0.49146202",
"0.4910852",
"0.48933667",
"0.48870376",
"0.48812035",
"0.48788184",
"0.48639396",
"0.48490056"
] | 0.5673163 | 0 |
Get the shortDOI for a DOI. Providing a cache dictionary will prevent multiple API requests for the same DOI. | def shorten(doi, cache={}, verbose=False):
if doi in cache:
return cache[doi]
quoted_doi = urllib.request.quote(doi)
url = 'http://shortdoi.org/{}?format=json'.format(quoted_doi)
try:
response = requests.get(url).json()
short_doi = response['ShortDOI']
except Exception as e:
if verbose:
print(doi, 'failed with', e)
return None
cache[doi] = short_doi
return short_doi | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_short_doi_url(doi):\n quoted_doi = urllib.request.quote(doi)\n url = 'http://shortdoi.org/{}?format=json'.format(quoted_doi)\n headers = {\n 'User-Agent': get_manubot_user_agent(),\n }\n try:\n response = requests.get(url, headers=headers).json()\n short_doi = response['ShortDOI']\n short_url = 'https://doi.org/' + short_doi[3:] # Remove \"10/\" prefix\n return short_url\n except Exception:\n logging.warning(f'shortDOI lookup failed for {doi}', exc_info=True)\n return None",
"def doi_info(self,doi):\n \n doi = _clean_doi(doi)\n \n url = self.BASE_URL + 'works/' + doi\n \n try:\n return self._make_get_request(url,models.work_single)\n except errors.RequestError:\n #TODO: Check for 404\n #last_response.status_code\n #TODO: Do this only if debugging is enabled\n if self.debug:\n #TODO: Also report code\n print(\"Error msg from server: \" + self.last_response.text)\n raise errors.InvalidDOI('Invalid DOI requested: ' + doi)\n \n #return self._make_get_request(url,models.Work,kwargs)",
"def get_bibtex_entry(doi, bibtext_cache={}, shortdoi_cache={}):\r\n bibtext = get_bibtext(doi, cache = bibtext_cache)\r\n if not bibtext:\r\n return None\r\n\r\n short_doi = shorten(doi, cache = shortdoi_cache)\r\n parser = BibTexParser()\r\n parser.ignore_nonstandard_types = False\r\n bibdb = bibtexparser.loads(bibtext, parser)\r\n entry, = bibdb.entries\r\n quoted_doi = urllib.request.quote(doi)\r\n entry['link'] = 'https://doi.org/{}'.format(quoted_doi)\r\n if 'author' in entry:\r\n entry['author'] = ' and '.join(entry['author'].rstrip(';').split('; '))\r\n entry['ID'] = short_doi[3:]\r\n return entry",
"def expand_short_doi(short_doi):\n if not short_doi.startswith('10/'):\n raise ValueError(f'shortDOIs start with `10/`, but expand_short_doi received: {short_doi}')\n url = f'https://doi.org/api/handles/{short_doi.lower()}'\n params = {\n \"type\": \"HS_ALIAS\",\n }\n response = requests.get(url, params=params)\n # response documentation at https://www.handle.net/proxy_servlet.html\n results = response.json()\n response_code = results.get('responseCode') # Handle protocol response code\n if response_code == 100:\n raise ValueError(f'Handle not found. Double check short_doi: {short_doi}')\n if response_code == 200:\n raise ValueError(f'HS_ALIAS values not found. Double check short_doi: {short_doi}')\n if response_code != 1:\n raise ValueError(f'Error response code of {response_code} returned by {response.url}')\n values = results.get('values', [])\n for value in values:\n if value.get('type') == 'HS_ALIAS':\n doi = value['data']['value']\n return doi.lower()\n raise RuntimeError(\n f'HS_ALIAS value not found by expand_short_doi(\"{short_doi}\")\\n'\n f'The following JSON was retrieved from {response.url}:\\n'\n + json.dumps(results, indent=2)\n )",
"def _get_tagged_doc(self, doi):\n\n return self.tagged_docs[list(map(lambda x: x.tags[0], self.tagged_docs)).index(doi)]",
"def resolve(self, doi, check_doi=True, whitespace=False, skip_cache=False):\n if doi is None or doi.strip()=='':\n raise BadDOI('DOI cannot be None or empty string')\n\n if check_doi:\n doi = self.check_doi(doi, whitespace=whitespace)\n \n url = None\n if not skip_cache:\n url = self._query_cache(doi)\n\n if url == None:\n url = self._query_api(doi)\n\n if self._cache:\n cache_key = self._make_cache_key(doi)\n self._cache[cache_key] = url\n self._log.info('cached results for key {cache_key} ({doi}) '.format(\n cache_key=cache_key, doi=doi))\n return url",
"def get_dataset_doi(dataset):\n token = get_access_token()\n\n dataset_id = get_dataset_id(token, dataset)\n try:\n doi_request = requests.get(f\"{PENNSIEVE_URL}/datasets/{dataset_id}/doi\", headers=create_request_headers(token))\n if doi_request.status_code == 404:\n return {\"doi\": \"No DOI found for this dataset\"}\n doi_request.raise_for_status()\n return {\"doi\": doi_request.json()[\"doi\"]}\n except Exception as e:\n if type(e).__name__ == \"HTTPError\":\n abort(400, e.response.json()[\"message\"])\n abort(500, \"An internal server error prevented the request from being fulfilled. Please try again later.\")",
"def get_doi(self):\n\n return self._dois",
"def _get_doi_metadata(identifier_doi) -> Dict:\n\n record_metadata = None\n record = search_record_by_doi(identifier_doi)\n\n if record:\n record_metadata = _metadata_builder(record[0], scheme=\"doi\")\n record_metadata[\"url\"] = f\"/records/{record[0]['id']}\"\n\n return record_metadata",
"def doi(self):\n return LiteratureReader(self.record).doi",
"def resolveDoi(doi):\n logging.debug('Resolving DOI %s' % doi)\n doiUrl = 'https://doi.org/' + urllib.quote(doi.encode('utf8'))\n page = httpGetDelay(doiUrl)\n trgUrl = page['url']\n logging.debug('DOI %s redirects to %s' % (doi, trgUrl))\n return trgUrl",
"def reserve_dataset_doi(dataset): # sourcery skip: extract-method\n token = get_access_token()\n\n dataset_id = get_dataset_id(token, dataset)\n\n try:\n doi_request = requests.post(f\"{PENNSIEVE_URL}/datasets/{dataset_id}/doi\", headers=create_request_headers(token))\n doi_request.raise_for_status()\n return {\"doi\": doi_request.json()[\"doi\"]}\n except Exception as e:\n print(e)\n if type(e).__name__ == \"HTTPError\":\n abort(400, e.response.json()[\"message\"])\n abort(500, \"An internal server error prevented the request from being fulfilled. Please try again later.\")",
"def get_datapublisher_from_doi(doi):\n\n r = requests.get(f\"https://api.datacite.org/dois/{doi}\")\n record = r.json()\n\n if r.status_code != 200:\n raise DataCiteError(record[\"errors\"][0][\"title\"])\n\n return record[\"data\"][\"attributes\"][\"publisher\"]",
"def DOIs(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('dois', default)\n return [HEP.DOIObject(i) for i in tmp]",
"def resolveDoiWithSfx(sfxServer, doi):\n logging.debug('Resolving doi %s with SFX' % doi)\n xmlQuery = '%s/SFX_API/sfx_local?XML=<?xml version=\"1.0\" ?><open-url><object_description><global_identifier_zone><id>doi:%s</id></global_identifier_zone><object_metadata_zone><__service_type>getFullTxt</__service_type></object_metadata_zone></object_description></open-url>' % (sfxServer, str(doi))\n return resolveWithSfx(sfxServer, xmlQuery)",
"def get_ddi(self, doi, format='ddi'):\n response = self.api.get_dataset_export(doi, format)\n return DDI(response.content)",
"def get_object(self, docobj):\n if docobj is None:\n return None\n return self._docmap.get(docobj)",
"def doi2json(doi):\n if \"arxiv\" in doi:\n print(\"This script does not yet support arXiv.\")\n sys.exit(2)\n else:\n url = \"https://dx.doi.org/\" + doi\n\n headers = {\"accept\": \"application/json\"}\n r = requests.get(url, headers = headers)\n\n if repr(r)==\"<Response [200]>\": #success!\n #handle potential encoding errors\n rtxt = clean_txt(r.text)\n try:\n j = json.loads(rtxt)\n except Exception as e:\n print(e)\n print(r)\n print(repr(rtxt))\n print(\"Error, DOI {} not found.\".format(doi))\n sys.exit(2)\n elif repr(r)==\"<Response [503]>\":\n print(\"Error 503: DOI service currently unavailable, try again in a bit.\")\n sys.exit(2)\n elif repr(r)==\"<Response [404]>\":\n print(\"Error 404: Resource {} not found\".format(url))\n sys.exit(2)\n else:\n print(\"Error {}.\".format(repr(r)))\n sys.exit(2)\n\n return j",
"def createDic(doc):\n\tlat = doc[0]\n\tlon = doc[1]\n\turl = doc[2]\n\tboilerpipe = doc[3]\n\tloc = {\"type\": \"Point\", \"coordinates\": [lon, lat]}\n\treturn {'url': url, 'boilerpipe': boilerpipe, 'loc': loc}",
"def GetDoctorFromID(self, iID):\n\t\tif iID not in self.ClientsMap: \n\t\t\tprint \"DentalDatabase::GetDoctorFromID: Requested doctor ID not found in database\"\n\t\t\treturn None \n\t\treturn self.ClientsMap[iID]",
"def get_doc(self, dtype, identity):\n if dtype == 'pii':\n doc = FullDoc(sd_pii = identity)\n elif dtype == 'doi':\n doc= FullDoc(doi = identity)\n\n if doc.read(ElsClient(self.API_list[0])):\n pass\n else:\n print (\"Read document failed.\")\n\n return doc",
"def find_cited_by(doi):\n if doi is None:\n return None\n\n citations = []\n if doi:\n response = requests.get(f\"https://opencitations.net/index/api/v1/citations/{doi}\").json()\n if response:\n citations = [{\"doi\": r['citing'].replace(\"coci =>\", \"\")} for r in response]\n if citations:\n return citations\n else:\n return None",
"def get_details(disease):\n\treturn d_desc_map[disease]",
"def get(self, osti_id: str) -> Union[ExplorerGetJSONResponseModel, None]:\n payload = {\"osti_id\": osti_id}\n r = requests.get(\n url=self.config.endpoint,\n auth=(self.config.username, self.config.password),\n params=payload,\n )\n if r.status_code == 200:\n if r.content == b\"[]\":\n return None\n else:\n content = json.loads(r.content)[0]\n return ExplorerGetJSONResponseModel.parse_obj(content)\n else:\n raise HTTPError(f\"Query for OSTI ID = {osti_id} failed\")",
"def getDOIFromCitation(citation):\n try:\n if \".org/\" in citation:\n DOI = citation.split(\".org/\")[1]\n elif citationContainsDOI(citation):\n DOI = citation.split(\"doi:\")[1]\n DOI = DOI.replace(\"]\", \"\")\n elif citation == \"unknown\":\n DOI = \"unknown\"\n else:\n DOI = citation\n # DOIs are case insensitive but lower-case seems to be preferred and is what's used by manubot\n DOI = DOI.lower()\n return DOI\n except:\n return \"unknown\"",
"def cached_WikidataItem_Q42():\n return m.WikidataItem(\"Q42\")",
"def load_registry_from_doi(self):\n\n # Ensure that this is indeed a DOI-based pooch\n downloader = choose_downloader(self.base_url)\n if not isinstance(downloader, DOIDownloader):\n raise ValueError(\n f\"Invalid base_url '{self.base_url}': \"\n + \"Pooch.load_registry_from_doi is only implemented for DOIs\"\n )\n\n # Create a repository instance\n doi = self.base_url.replace(\"doi:\", \"\")\n repository = doi_to_repository(doi)\n\n # Call registry population for this repository\n return repository.populate_registry(self)",
"def get_url_from_doi(doi):\n\n try:\n r = requests.head(f\"https://doi.org/{doi}\", allow_redirects=True)\n except requests.exceptions.ConnectionError:\n return None\n\n return r.url",
"def search_doi(self, soup):\n doi_tag = soup.find(\"link\", {\"title\": \"doi\"})\n if doi_tag:\n self.log.warning(f\"article has doi: {doi_tag['href']}\")",
"def DOIs(self, default=[None]):\n return self.data.get('dois', default)"
] | [
"0.6388651",
"0.5892598",
"0.5868604",
"0.5838415",
"0.58303165",
"0.56667435",
"0.5500369",
"0.5332493",
"0.5332032",
"0.5264312",
"0.52386665",
"0.5169312",
"0.5138275",
"0.5088207",
"0.5036826",
"0.50280404",
"0.5017073",
"0.49617666",
"0.49116126",
"0.4906788",
"0.4874662",
"0.4772827",
"0.47223696",
"0.47179258",
"0.4710937",
"0.4700474",
"0.4690708",
"0.46472615",
"0.46444422",
"0.45781496"
] | 0.76168287 | 0 |
Return a bibtexparser entry for a DOI | def get_bibtex_entry(doi, bibtext_cache={}, shortdoi_cache={}):
bibtext = get_bibtext(doi, cache = bibtext_cache)
if not bibtext:
return None
short_doi = shorten(doi, cache = shortdoi_cache)
parser = BibTexParser()
parser.ignore_nonstandard_types = False
bibdb = bibtexparser.loads(bibtext, parser)
entry, = bibdb.entries
quoted_doi = urllib.request.quote(doi)
entry['link'] = 'https://doi.org/{}'.format(quoted_doi)
if 'author' in entry:
entry['author'] = ' and '.join(entry['author'].rstrip(';').split('; '))
entry['ID'] = short_doi[3:]
return entry | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_tagged_doc(self, doi):\n\n return self.tagged_docs[list(map(lambda x: x.tags[0], self.tagged_docs)).index(doi)]",
"def doi(self):\n return LiteratureReader(self.record).doi",
"def getDOIFromCitation(citation):\n try:\n if \".org/\" in citation:\n DOI = citation.split(\".org/\")[1]\n elif citationContainsDOI(citation):\n DOI = citation.split(\"doi:\")[1]\n DOI = DOI.replace(\"]\", \"\")\n elif citation == \"unknown\":\n DOI = \"unknown\"\n else:\n DOI = citation\n # DOIs are case insensitive but lower-case seems to be preferred and is what's used by manubot\n DOI = DOI.lower()\n return DOI\n except:\n return \"unknown\"",
"def parseTodoEntry(entry, wikiDocument=None):\r\n return None",
"def parse_doi(pubmed_article):\n medline = pubmed_article.find(\"MedlineCitation\")\n article = medline.find(\"Article\")\n elocation_ids = article.findall(\"ELocationID\")\n\n if len(elocation_ids) > 0:\n for e in elocation_ids:\n doi = e.text.strip() or \"\" if e.attrib.get(\"EIdType\", \"\") == \"doi\" else \"\"\n else:\n article_ids = pubmed_article.find(\"PubmedData/ArticleIdList\")\n if article_ids is not None:\n doi = article_ids.find('ArticleId[@IdType=\"doi\"]')\n doi = (\n (doi.text.strip() if doi.text is not None else \"\")\n if doi is not None\n else \"\"\n )\n else:\n doi = \"\"\n return doi",
"def get_datapublisher_from_doi(doi):\n\n r = requests.get(f\"https://api.datacite.org/dois/{doi}\")\n record = r.json()\n\n if r.status_code != 200:\n raise DataCiteError(record[\"errors\"][0][\"title\"])\n\n return record[\"data\"][\"attributes\"][\"publisher\"]",
"def __init__(self, doi='', reference=''):\n\n self.doi = doi\n self.reference = reference",
"def make_bibtex(self):\n\n\t\t# bib = requests.request('GET', 'http://dx.doi.org/' + self.doi, ",
"def doi2json(doi):\n if \"arxiv\" in doi:\n print(\"This script does not yet support arXiv.\")\n sys.exit(2)\n else:\n url = \"https://dx.doi.org/\" + doi\n\n headers = {\"accept\": \"application/json\"}\n r = requests.get(url, headers = headers)\n\n if repr(r)==\"<Response [200]>\": #success!\n #handle potential encoding errors\n rtxt = clean_txt(r.text)\n try:\n j = json.loads(rtxt)\n except Exception as e:\n print(e)\n print(r)\n print(repr(rtxt))\n print(\"Error, DOI {} not found.\".format(doi))\n sys.exit(2)\n elif repr(r)==\"<Response [503]>\":\n print(\"Error 503: DOI service currently unavailable, try again in a bit.\")\n sys.exit(2)\n elif repr(r)==\"<Response [404]>\":\n print(\"Error 404: Resource {} not found\".format(url))\n sys.exit(2)\n else:\n print(\"Error {}.\".format(repr(r)))\n sys.exit(2)\n\n return j",
"def func(item):\n\t\textract = lambda regex: re.search(regex, item).group(1) if regex else None\n\t\ttry:\n\t\t\tauthors = extract(authors_regex)\n\t\t\ttitle = extract(title_regex)\n\t\t\tyear = extract(year_regex)\n\t\t\tauthor_list = [author.strip() for author in authors.split(author_sep)]\n\t\t\treturn BibItem(author_list, title, year)\n\t\texcept Exception as e:\n\t\t\tprint(\"WARNING: Could not parse item: \\n\" + item)\n\t\t\tprint(\"Error was: \", e)",
"def process_bib_entry(\n cid, bibdatabase, bibnums, fallback_fmt=\"[{author_abbrev}, {year}]\"\n):\n entry = bibdatabase[cid]\n if cid not in bibnums:\n bibnums[cid] = len(bibnums) + 1\n\n if \"doi\" in entry:\n return r'<a href=\"https://doi.org/{doi}\">{text}</a>'.format(\n doi=entry[\"doi\"], text=bibnums[cid]\n )\n elif \"url\" in entry:\n return r'<a href=\"{url}\">{text}</a>'.format(url=entry[\"url\"], text=bibnums[cid])\n elif \"link\" in entry:\n return r'<a href=\"{url}\">{text}</a>'.format(\n url=entry[\"link\"], text=bibnums[cid]\n )\n else:\n return bibnums[cid]\n # add_abbreviated_author(entry)\n # split_date(entry)\n # return DefaultFormatter().format(fallback_fmt, **entry)",
"def getCitationFromIssue(issue):\n try:\n if \"\\nCitation: \" in issue[\"body\"]:\n citation = issue[\"body\"].split(\"\\nCitation: \")[1].split(\" \")[0]\n else:\n afterDOI = issue[\"body\"].split(\"DOI:\")[1]\n citation = afterDOI.split(\" \")[0]\n if citation == \"\":\n citation = afterDOI.split(\" \")[1]\n if \"\\r\\n\" in citation:\n citation = citation.split(\"\\r\\n\")[0]\n if citation.startswith(\"@\"):\n citation = citation[1:]\n return citation\n\n except:\n print(\n \"the citation could not be automatically extracted from the following issue: \\n\",\n issue[\"title\"])\n return \"unknown\"",
"def doi_info(self,doi):\n \n doi = _clean_doi(doi)\n \n url = self.BASE_URL + 'works/' + doi\n \n try:\n return self._make_get_request(url,models.work_single)\n except errors.RequestError:\n #TODO: Check for 404\n #last_response.status_code\n #TODO: Do this only if debugging is enabled\n if self.debug:\n #TODO: Also report code\n print(\"Error msg from server: \" + self.last_response.text)\n raise errors.InvalidDOI('Invalid DOI requested: ' + doi)\n \n #return self._make_get_request(url,models.Work,kwargs)",
"def main(argv):\n if len(argv) == 0 or argv == None:\n print(HELP_TXT)\n sys.exit(2)\n try:\n opts, args = getopt.getopt(argv,\"dhmb:\",[\"debug\", \"help\", \"markdown\", \"biblio=\"])\n except getopt.GetoptError:\n print(HELP_TXT)\n sys.exit(2)\n\n testing = False\n markdown = False\n bibtex_filename = \"library.bib\"\n \n for opt, arg in opts:\n if opt in ('-h', '--help'):\n print(HELP_TXT)\n sys.exit()\n elif opt in ('-d', '--debug'):\n testing = True\n print(\"Fetching of DOI and printing metadata, but not adding to bibtex or creating markdown.\")\n elif opt in ('-b', '--biblio'):\n bibtex_filename = arg\n elif opt in ('-m', '--markdown'):\n markdown = True\n print(\"Writing entry to bibtex file but not creating a markdown file.\")\n\n if len(args) == 0:\n print(\"Error: At least one DOI must be provided.\")\n sys.exit(2)\n\n for doi in args:\n #Get the metadata from crossref.org\n res = doi2json(doi)\n \n #extract\n meta = extract_metadata(res)\n \n #check if reference exists\n citation_found = True\n letters = list(string.ascii_lowercase)\n try:\n with open(bibtex_filename, 'r') as fobj:\n text = fobj.read().strip()\n tmp_key = meta['citationkey']\n for letter in letters:\n if tmp_key in text:\n tmp_key = meta['citationkey'] + letter\n else:\n meta['citationkey'] = tmp_key\n break\n except FileNotFoundError:\n pass\n\n #Then write if not testing\n if testing:\n print(\"DOI RESULTS:\")\n for k,v in res.items():\n if type(v) is list:\n print(k + ': ' + str(v) + '\\n')\n else:\n print(k + ': \"' + str(v) + '\"\\n')\n print(\"PARSED METADATA:\")\n for k,v in meta.items():\n if type(v) is list:\n print(k + ': ' + str(v) + '\\n')\n else:\n print(k + ': \"' + str(v) + '\"\\n')\n else:\n if not os.path.isfile(bibtex_filename):\n print(\"Bibtex file {} does not exist, creating it.\".format(bibtex_filename))\n\n #Add the metadata to the bibtex reference\n with open(bibtex_filename, \"a+\") as f:\n s = \",\".join([\n \"\\n@article{\" + meta['citationkey'],\n \"\\nauthor = {\" + \" and \".join(meta['authors']) + \"}\",\n \"\\ndoi = {\" + meta['doi'] + \"}\",\n \"\\nissn = {\" + meta['issn'] + \"}\",\n \"\\njournal = {\" + meta['container'] + \"}\",\n \"\\npublisher = {\" + meta['publisher'] + \"}\",\n \"\\nnumber = {\" + meta['issue'] + \"}\",\n \"\\npages = {\" + meta['pages'] + \"}\",\n \"\\ntitle = {\" + meta['source-title'] + \"}\",\n \"\\nurl = {\" + meta['link'] + \"}\",\n \"\\nvolume = {\" + meta['volume'] + \"}\",\n \"\\nyear = {\" + meta['year'] + \"}\\n}\\n\\n\"\n ])\n f.write(s)\n \n print(\"reference {} from {} added to {}!\\n\".format(meta['citationkey'], meta['container'], bibtex_filename))\n \n # create the markdown notes file\n if markdown:\n subject_dir = re.sub(r'\\([^)]*\\)', '', meta['subjects'][0]).strip().lower().replace(\" \", \"_\").replace(\",\",\"\")\n md_dir = \"/\".join([HOME_DIR, REPO_DIR, MD_DIR, subject_dir])\n os.mkdir(md_dir) if os.path.isdir(md_dir) == False else None\n filename = \"/\".join([md_dir, meta['citationkey'] + \".md\"])\n with open(filename, \"w\") as f:\n f.write(\"---\\nlayout: mathpost\\n\")\n for k,v in meta.items():\n try:\n if type(v) is list:\n f.write(k + ': ' + str(v) + '\\n')\n else:\n f.write(k + ': \"' + str(v) + '\"\\n')\n except UnicodeEncodeError as e:\n print(\"Unicode Error. Some character(s) may be wrong.\")\n print(meta.keys())\n print(repr(k) + \": \" + repr(v))\n print(e)\n \n f.write(\"---\\n\\n\")\n citation = \"**Citation:** \" \\\n + meta['citation-authors'] + ' \"' \\\n + meta['source-title'] + '\". ' \\\n + '*' + meta['container'] + '*'\n citation = citation + \" \" + meta['volume'] if meta['volume'] != '' else citation\n citation = citation + \", no. \" + meta['issue'] if meta['issue'] != '' else citation\n citation = citation + \" (\" + meta['year'] + \")\" if meta['year'] != '' else citation\n citation = citation + \": \" + meta['pages'] if meta['pages'] != '' else citation\n \n f.write(citation + \". [[Paper link](\" + meta['link'] + \")]\")\n \n #add the reference to the \"reading_list.md\" file\n with open(\"/\".join([HOME_DIR, REPO_DIR, MD_DIR, \"reading_list.md\"]), \"a\") as f:\n f.write(\"* [ ] **\" + meta['citationkey'] \\\n + \"**: (\" \\\n + re.sub(r'\\([^)]*\\)', '', meta['subjects'][0]).strip() \\\n + \") \" + meta['link'] + \"\\n\")\n \n print(\"reference {} added in {}!\\n\".format(meta['citationkey'], md_dir))",
"def search_doi(self, soup):\n doi_tag = soup.find(\"link\", {\"title\": \"doi\"})\n if doi_tag:\n self.log.warning(f\"article has doi: {doi_tag['href']}\")",
"def _get_doi_metadata(identifier_doi) -> Dict:\n\n record_metadata = None\n record = search_record_by_doi(identifier_doi)\n\n if record:\n record_metadata = _metadata_builder(record[0], scheme=\"doi\")\n record_metadata[\"url\"] = f\"/records/{record[0]['id']}\"\n\n return record_metadata",
"def get_bibtex(self, osti_id: str) -> Union[str, None]:\n payload = {\"osti_id\": osti_id}\n header = {\"Accept\": \"application/x-bibtex\"}\n try:\n r = requests.get(\n url=self.config.endpoint,\n auth=(self.config.username, self.config.password),\n params=payload,\n headers=header,\n )\n except Exception:\n raise HTTPError(f\"Failed to request for OSTI ID = {osti_id}\")\n if r.status_code == 200:\n if r.content.decode() == \"\":\n return None\n return r.content.decode()\n else:\n raise HTTPError(f\"Query for OSTI ID = {osti_id} failed\")",
"def resolveDoi(doi):\n logging.debug('Resolving DOI %s' % doi)\n doiUrl = 'https://doi.org/' + urllib.quote(doi.encode('utf8'))\n page = httpGetDelay(doiUrl)\n trgUrl = page['url']\n logging.debug('DOI %s redirects to %s' % (doi, trgUrl))\n return trgUrl",
"def getdoc():\n\n\timport webnotes\n\tfrom webnotes.utils import cint\n\t\n\tform = webnotes.form_dict\n\tdoctype, docname = form.get('doctype'), form.get('name')\n\tprefix = cint(form.get('from_archive')) and 'arc' or 'tab'\n\n\tif not (doctype and docname):\n\t\traise Exception, 'doctype and name required!'\n\t\n\tdoclist = []\n\t# single\n\tdoclist = load_single_doc(doctype, docname, (form.get('user') or webnotes.session['user']), prefix)\n\t\n\t# load doctype along with the doc\n\tif form.get('getdoctype'):\n\t\timport webnotes.model.doctype\n\t\tdoclist += webnotes.model.doctype.get(doctype)\n\n\t# tag as archived\n\tif prefix == 'arc':\n\t\tdoclist[0].__archived=1\n\n\twebnotes.response['docs'] = doclist",
"def parse_bibtex(self, data: str) -> Dict:\n\n new_bib = [line for line in data.splitlines() if \"= ,\" not in line]\n new_bib = \"\\n\".join(new_bib)\n bib_db: bibtexparser.bibdatabase.BibDatabase = bibtexparser.loads(new_bib)\n result = dict()\n for entry in bib_db.entries:\n osti_id = entry[\"ID\"].split(\"_\")[1]\n result[osti_id] = entry\n return result",
"def parse_doi(iso_xml):\n tree = ET.parse('iso.xml')\n root = tree.getroot()\n doi_el = root.findall(\n '.gmd:identificationInfo/gmd:MD_DataIdentification/gmd:citation/'\n 'gmd:CI_Citation/gmd:identifier/gmd:MD_Identifier/gmd:code/'\n 'gco:CharacterString', NS_DICT\n )[0]\n return doi_el.text.strip()",
"def test_paper_doi(self, doi_dataset, doi_bib, orcid, metadata_parser):\n apply_mock(doi_dataset, doi_bib, orcid, metadata_parser)\n os.chdir(\"input/\")\n _set_args(\"-i\",\"yamls/paper_doi.yaml\",\"-o\",\"../out\",\"-ns\")\n with HiddenPrints():\n ya2ro.main()",
"def get_abstract(doi):\n xml = download_article(doi)\n et = ET.fromstring(xml)\n coredata = et.find('article:coredata', elsevier_ns)\n abstract = coredata.find('dc:description', elsevier_ns)\n abs_text = abstract.text\n return abs_text",
"def read_ncbi_disease(flo):\n documents = []\n current_PMID, title, abstract, annotations = None, None, None, []\n for ln, line in enumerate(flo, start=1):\n line = line.rstrip('\\n')\n if not line:\n if current_PMID is not None:\n documents.append(Document(current_PMID, title, abstract,\n annotations))\n current_PMID, title, abstract = None, None, None\n annotations = []\n continue\n m = TEXT_LINE_RE.match(line)\n if m:\n PMID, tiab, text = m.groups()\n current_PMID = check_PMID(current_PMID, PMID)\n if tiab == 't':\n if title is not None:\n raise FormatError('Multiple titles for %s' % PMID)\n title = text\n elif tiab == 'a':\n if abstract is not None:\n raise FormatError('Multiple abstracts for %s' % PMID)\n abstract = text\n else:\n raise FormatError('Failed to parse line %s' % line)\n else:\n # Annotation line\n annotation = parse_annotation_line(line, ln)\n current_PMID = check_PMID(current_PMID, annotation.PMID)\n annotations.append(annotation)\n if current_PMID is not None:\n documents.append(Document(current_PMID, title, abstract, annotations))\n for d in documents:\n d.verify_annotations()\n return documents",
"def get_article(doi, output='txt'):\n xml = download_article(doi)\n if xml is None:\n return None\n et = ET.fromstring(xml)\n full_text = et.find('article:originalText', elsevier_ns)\n if full_text is None:\n logging.info('Could not find full text for %s.' % doi)\n return None\n main_body = full_text.find('xocs:doc/xocs:serial-item/ja:article/ja:body',\n elsevier_ns)\n if main_body is None:\n return None\n if output == 'xml':\n return main_body\n elif output == 'txt':\n sections = main_body.findall('common:sections/common:section',\n elsevier_ns)\n full_txt = ''\n for s in sections:\n # Paragraphs that are directly under the section\n pars = s.findall('common:para', elsevier_ns)\n # Paragraphs that are under a section within the section\n pars += s.findall('common:section/common:para', elsevier_ns)\n for p in pars:\n # Get the initial string inside the paragraph\n if p.text is not None:\n full_txt += p.text\n # When there are tags inside the paragraph (for instance\n # references), we need to take those child elements one by one\n # and get the corresponding tail strings and join these. \n full_txt += ''.join([c.tail if c.tail is not None \n else '' for c in p.getchildren()])\n full_txt += '\\n'\n else:\n logging.error('Unknown output format %s.' % output)\n return None\n return full_txt",
"def create_citation(cit):\n if cit is not None:\n if cit['citation-type'] == \"BIBTEX\":\n return pybtex.database.parse_string(cit['citation-value'], \"bibtex\")\n return None",
"def get_dois(wt):\n lines = get_wiki_lines(wt, predicate=any)\n return [doi.find_doi_in_text(l) for l in lines]",
"def check_doi(self, doi, whitespace=False):\n\n result_doi = find_doi_in_string(doi, whitespace=False)\n if result_doi is None:\n raise BadDOI('Supplied DOI \"%s\" fails doi check' % doi)\n return doi",
"def format_bib_entry(e: BibDocument):\n if e.bibtex is not None:\n b = e.bibtex\n s = fix_string(b.get('title', b.get('ID', '?'))) + '\\n'\n s += format_author(b.get('author', b.get('editor', '?'))) + ' ' + b.get('year', '')\n if len(e.filepaths) > 0:\n s += ' [PDF]'\n return s\n else:\n return e.relpath()",
"def get_metadata_from_crossref(doi_string):\n doi_types = {\n 'journal-article': {\n 'aliases': [],\n 'data_fields': {\n 'publisher': {\n 'access': 'path',\n 'path_elements': ['message', 'publisher']\n },\n 'journal_full_title': {\n 'access': 'path',\n 'path_elements': ['message', 'container-title', 0]\n },\n 'issn': {\n 'access': 'path',\n 'path_elements': ['message', 'ISSN', 0]\n },\n 'license_ref': {\n 'access': 'function',\n 'func_name': '_extract_crossref_license',\n 'additional_params': []\n },\n 'issn_print': {\n 'access': 'function',\n 'func_name': '_extract_crossref_isxn',\n 'additional_params': ['issn', 'print']\n },\n 'issn_electronic': {\n 'access': 'function',\n 'func_name': '_extract_crossref_isxn',\n 'additional_params': ['issn', 'electronic']\n }\n }\n },\n 'book': {\n 'aliases': ['monograph'],\n 'data_fields': {\n 'publisher': {\n 'access': 'path',\n 'path_elements': ['message', 'publisher']\n },\n 'book_title': {\n 'access': 'path',\n 'path_elements': ['message', 'title', 0]\n },\n 'isbn': {\n 'access': 'path',\n 'path_elements': ['message', 'ISBN', 0]\n },\n 'license_ref': {\n 'access': 'function',\n 'func_name': '_extract_crossref_license',\n 'additional_params': []\n },\n 'isbn_print': {\n 'access': 'function',\n 'func_name': '_extract_crossref_isxn',\n 'additional_params': ['isbn', 'print']\n },\n 'isbn_electronic': {\n 'access': 'function',\n 'func_name': '_extract_crossref_isxn',\n 'additional_params': ['isbn', 'electronic']\n }\n }\n }\n }\n doi = get_normalised_DOI(doi_string)\n if doi is None:\n error_msg = 'Parse Error: \"{}\" is no valid DOI'.format(doi_string)\n return {'success': False, 'error_msg': error_msg, 'exception': None}\n\n url = 'http://api.crossref.org/works/' + doi\n req = Request(url)\n req.add_header('User-Agent', USER_AGENT)\n ret_value = {'success': True}\n try:\n response = urlopen(req)\n content_string = response.read()\n data = json.loads(content_string)\n data_doi_type = data['message']['type']\n normalized_doi_type = None\n for doi_type, content in doi_types.items():\n if data_doi_type == doi_type or data_doi_type in content['aliases']:\n normalized_doi_type = doi_type\n break\n if normalized_doi_type is None:\n title = data['message']['title'][0]\n raise UnsupportedDoiTypeError(data_doi_type, doi_types, title)\n crossref_data = {'doi_type': normalized_doi_type}\n data_fields = doi_types[normalized_doi_type]['data_fields']\n for field, access_method in data_fields.items():\n if access_method['access'] == 'path':\n position = data\n for element in access_method['path_elements']:\n try:\n position = position[element]\n except (KeyError, IndexError):\n crossref_data[field] = None\n break\n else:\n crossref_data[field] = position\n elif access_method['access'] == 'function':\n function = globals()[access_method['func_name']]\n params = [data] + access_method['additional_params']\n crossref_data[field] = function(*params)\n ret_value['data'] = crossref_data\n except HTTPError as httpe:\n ret_value['success'] = False\n ret_value['error_msg'] = 'HTTPError: {} - {}'.format(httpe.code, httpe.reason)\n ret_value['exception'] = httpe\n except RemoteDisconnected as rd:\n ret_value['success'] = False\n ret_value['error_msg'] = 'Remote Disconnected: {}'.format(str(rd))\n ret_value['exception'] = rd\n except ConnectionResetError as cre:\n ret_value['success'] = False\n ret_value['error_msg'] = 'Connection Reset: {}'.format(str(cre))\n ret_value['exception'] = cre\n except URLError as urle:\n ret_value['success'] = False\n ret_value['error_msg'] = 'URLError: {}'.format(urle.reason)\n ret_value['exception'] = urle\n except ET.ParseError as etpe:\n ret_value['success'] = False\n ret_value['error_msg'] = 'ElementTree ParseError: {}'.format(str(etpe))\n ret_value['exception'] = etpe\n except UnsupportedDoiTypeError as udte:\n ret_value['success'] = False\n ret_value['error_msg'] = str(udte)\n ret_value['exception'] = udte\n return ret_value"
] | [
"0.62905365",
"0.62419164",
"0.5902826",
"0.5854164",
"0.57459015",
"0.55268157",
"0.54975694",
"0.54722095",
"0.54414177",
"0.5378782",
"0.53721684",
"0.5337533",
"0.52963126",
"0.5257952",
"0.52536035",
"0.52379805",
"0.52209264",
"0.5174101",
"0.5173018",
"0.5162918",
"0.51382595",
"0.5069809",
"0.5068834",
"0.5053487",
"0.504068",
"0.50348777",
"0.49880543",
"0.4976845",
"0.49730757",
"0.49643588"
] | 0.74704564 | 0 |
Computes the forward cone from point p. | def forward_cone(self, p):
return to_rec(zip(p, self.top)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def backward_cone(self, p):\n return to_rec(zip(self.bot, p))",
"def conjgradient(x, p, gprev, gnew):\r\n gnew = np.array(gnew)[np.newaxis]\r\n gprev = np.array(gprev)[np.newaxis]\r\n gnew = gnew.T\r\n gprev = gprev.T\r\n beta = (gnew.T)@gnew/((gprev.T)@gprev)\r\n gnew = gnew.flatten()\r\n beta = beta.flatten()\r\n p = -gnew + beta*p\r\n return p",
"def projectPoint(self,p):\n a,b,c = self.a, self.b, self.c\n x,y = p\n return numpy.array( [ b*(x*b-y*a) - c*a, a*(y*a-x*b) - c*b ] )",
"def _wf(self, p):\n r = self.faces - p\n n = norm(r, axis=2)\n num = row_wise_dot(r[:, 0, :], np.cross(r[:, 1, :], r[:, 2, :]))\n den = n[:, 1] * n[:, 2] * n[:, 0]\n for i in range(3):\n j = (i + 1) % 3\n k = (i + 2) % 3\n den += row_wise_dot(r[:, i, :], r[:, j, :]) * n[:, k]\n return 2*np.arctan2(num, den)",
"def CE(self, p_true, p_model):\n return np.sum(-np.array(p_true)*np.log2(np.array(p_model)))",
"def convex_conj(self):\n return ConstantFunctional(self.domain, -self.constant)",
"def convex_conj(self):\n return KullbackLeiblerConvexConj(self.domain, self.prior)",
"def conj(self, o): \n return (o.inv()) * self * o",
"def bic(self, p=1) -> float:\n assert p >= 0\n\n n = len(self.true)\n return float(n * np.log(self.sse() / n) + p * np.log(n))",
"def ccw(p1, p2, p3):\n return (p2[0] - p1[0])*(p3[1] - p1[1]) - (p2[1] - p1[1])*(p3[0] - p1[0])",
"def conj(z):",
"def connection_point(x, f, p):\n y = collision_point(x, f, p)\n if not p(y):\n return y\n return convergent_point(x, f(y), f)",
"def convex_conj(self):\n return KullbackLeiblerCrossEntropyConvexConj(self.domain, self.prior)",
"def gradient(E: np.ndarray, p: np.ndarray) -> np.ndarray:\n\n adj = np.abs(np.sign(E))\n F_pot = np.array(adj, copy=True, dtype=float)\n F_pot[F_pot.nonzero()] = gradop(adj) * p / E[E.nonzero()] ** 2\n return F_pot",
"def cone(individual, position, height, width):\n value = 0.0\n for x, p in zip(individual, position):\n value += (x - p)**2\n return height - width * math.sqrt(value)",
"def algorithm_1_2(p, c, x):\n\n q = np.array(c, dtype=np.float64)\n\n for k in range(1, p + 1):\n for j in range(0, p - k + 1):\n q[j] = (1 - x) * q[j] + x * q[j + 1]\n return q[0]",
"def cone(self, length):\n X, Y, Z = baseFromAngles(self.angles)\n result = polyhedron([], [], [])\n assert result.pnFacesInPoly()\n f = result.addVertex(self.focalPoint)\n for s in self.segments:\n vect0 = (self.focalDist * X +\n s[0][0] * Z +\n s[0][1] * Y).normalize()\n vect1 = (self.focalDist * X +\n s[1][0] * Z +\n s[1][1] * Y).normalize()\n assert result.pnFacesInPoly()\n vert0 = result.addVertex(vector(self.focalPoint) +\n length * vect0)\n assert result.pnFacesInPoly()\n vert1 = result.addVertex(vector(self.focalPoint) +\n length * vect1)\n assert result.pnFacesInPoly()\n try:\n result.addFace([f, vert1, vert0])\n except ValueError:\n result.plot()\n p = polyhedron([], [], [])\n p.addFace([f, vert1, vert0])\n p.union(result).plot()\n self.plot()\n assert False\n assert result.pnFacesInPoly()\n # WARNING : the cone is not closed at its top.\n # I'm not sure if this can cause issues.\n return result",
"def convex_conj(self):\n return IndicatorNuclearNormUnitBall(\n self.domain,\n conj_exponent(self.outernorm.exponent),\n conj_exponent(self.pwisenorm.exponent))",
"def convex_conj(self):\n return KullbackLeiblerCrossEntropy(self.domain, self.prior)",
"def convex_conj(self):\n return KullbackLeibler(self.domain, self.prior)",
"def calculate_curvature(P):\n y = P[:,1].copy()\n x = P[:,0].copy()\n dx = np.gradient(x)\n yd = np.gradient(y, dx)\n ydd = np.gradient(yd, dx)\n return np.sum(ydd**2)",
"def convex_conj(self):\n return IndicatorLpUnitBall(self.domain,\n exponent=conj_exponent(self.exponent))",
"def convex_conj(self):\n convex_conjs = [func.convex_conj for func in self.functionals]\n return SeparableSum(*convex_conjs)",
"def gradient(self, p, action):\n grad = (p - action)/(p**2 - p)\n return grad",
"def convex_conj(self):\n return NuclearNorm(self.domain,\n conj_exponent(self.__norm.outernorm.exponent),\n conj_exponent(self.__norm.pwisenorm.exponent))",
"def convex_conj(self):\n return (1.0 / 4) * L2NormSquared(self.domain)",
"def Fwacdc(X, g, p0, back, alpha):\n Eac, Edc = X\n return p0 * Eac * g + back + alpha * (2.0 * g * Edc * Eac)",
"def convex_conj(self):\n return IndicatorZero(self.domain, -self.constant)",
"def cows(P0, t, coeff):\n P = P0\n r, K = coeff\n dP = r * (1 - P/K) * P # differential equation dP/dt\n return dP",
"def jacobian(self, p):\n delta = 1.\n props = {'density': self.density}\n xp, zp = self.x, self.z\n verts = self.verts\n x, z = p\n jac = np.transpose([\n (talwani.gz(xp, zp, [Polygon(verts + [[x + delta, z]], props)]) -\n talwani.gz(xp, zp, [Polygon(verts + [[x - delta, z]], props)])\n ) / (2. * delta),\n (talwani.gz(xp, zp, [Polygon(verts + [[x, z + delta]], props)]) -\n talwani.gz(xp, zp, [Polygon(verts + [[x, z - delta]], props)])\n ) / (2. * delta)])\n return jac"
] | [
"0.624741",
"0.62070286",
"0.5786345",
"0.57365304",
"0.5715694",
"0.56874",
"0.5583418",
"0.55606633",
"0.5546253",
"0.5532969",
"0.55318826",
"0.55309975",
"0.54770863",
"0.5470166",
"0.54118145",
"0.54030555",
"0.53950745",
"0.5390107",
"0.5386848",
"0.536962",
"0.53636897",
"0.535987",
"0.5338782",
"0.53349173",
"0.53281194",
"0.5311466",
"0.5279773",
"0.525687",
"0.52552605",
"0.52344203"
] | 0.715168 | 0 |
Computes the backward cone from point p. | def backward_cone(self, p):
return to_rec(zip(self.bot, p)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def conjgradient(x, p, gprev, gnew):\r\n gnew = np.array(gnew)[np.newaxis]\r\n gprev = np.array(gprev)[np.newaxis]\r\n gnew = gnew.T\r\n gprev = gprev.T\r\n beta = (gnew.T)@gnew/((gprev.T)@gprev)\r\n gnew = gnew.flatten()\r\n beta = beta.flatten()\r\n p = -gnew + beta*p\r\n return p",
"def _re(self, p):\n return self.edges[:, 0, :] - p # 0 is arbitrary - the other end also works",
"def forward_cone(self, p):\n return to_rec(zip(p, self.top))",
"def flip(self, p):\n return -p",
"def projectPoint(self,p):\n a,b,c = self.a, self.b, self.c\n x,y = p\n return numpy.array( [ b*(x*b-y*a) - c*a, a*(y*a-x*b) - c*b ] )",
"def counterclockwise(self, p1, p2, p3):\n return self.cross(Point(p2.x - p1.x, p2.y - p1.y), Point(p3.x - p1.x, p3.y - p1.y))",
"def gradient(E: np.ndarray, p: np.ndarray) -> np.ndarray:\n\n adj = np.abs(np.sign(E))\n F_pot = np.array(adj, copy=True, dtype=float)\n F_pot[F_pot.nonzero()] = gradop(adj) * p / E[E.nonzero()] ** 2\n return F_pot",
"def conj(z):",
"def backward(self, x_out, x_target):\r\n return 2*(x_out - x_target)",
"def backward(self):\n gradient = blah\n return gradient",
"def backward(self):\n gradient = blah\n return gradient",
"def backward(self, z):\n return self.forward(z) * (1 - self.forward(z))",
"def line_ccw(a, b, p):\n return (p[1] - a[1]) * (b[0] - a[0]) > (b[1] - a[1]) * (p[0] - a[0])",
"def ccw(p1, p2, p3):\n return (p2[0] - p1[0])*(p3[1] - p1[1]) - (p2[1] - p1[1])*(p3[0] - p1[0])",
"def backward_p(self, x):\n log_det_jacob, z = x.new_zeros(x.shape[0]), x\n for i in reversed(range(len(self.t))):\n z_ = self.mask[i] * z\n s = self.s[i](z_) * (1 - self.mask[i])\n t = self.t[i](z_) * (1 - self.mask[i])\n z = (1 - self.mask[i]) * (z - t) * torch.exp(-s) + z_\n log_det_jacob -= s.sum(dim=1)\n return z, log_det_jacob",
"def gradient(self, p, action):\n grad = (p - action)/(p**2 - p)\n return grad",
"def convex_conj(self):\n return KullbackLeibler(self.domain, self.prior)",
"def backward(self, y):\n pass",
"def backward_character():\r\n set_point(point().offset(-1))",
"def tangent(self, p):\n p = array(p, float)\n v = (p - self.o)\n v /= norm(v)\n b = self.o + ((cross(v, self.N) - v) / 3)*self.r\n mb = _mirror_(self.o, p, b) \n mbb = mb - b\n return mbb/norm(mbb)",
"def depolarising_channel(p):\n return qt.kraus_to_super([\n np.sqrt(1 - 3.*p/4.) * qt.qeye(2),\n np.sqrt(p/4.) * qt.sigmax(),\n np.sqrt(p/4.) * qt.sigmay(),\n np.sqrt(p/4.) * qt.sigmaz()\n ])",
"def backppg_ce(self,x,y):\n activation = x\n activations = [x]\n zs = []\n #feed forward\n for w,b in zip(self.weights,self.biases):\n z = np.dot(w, activation)+b\n zs.append(z)\n activation = sigmod(z)\n activations.append(activation)\n #back propagation\n delta_w = [np.zeros(w.shape) for w in self.weights]\n delta_b = [np.zeros(b.shape) for b in self.biases]\n delta = activations[-1]-y\n delta_w[-1] = np.dot(delta, activations[-2].transpose())\n delta_b[-1] = delta\n for j in xrange(2, self.numlayers):\n delta = np.dot(self.weights[-j+1].transpose(), delta)*sigmod_deri(zs[-j])\n delta_b[-j] = delta\n delta_w[-j] = np.dot(delta, activations[-j-1].transpose())\n return (delta_b, delta_w)",
"def _get_grad_direction_cost(self, p, q):\n dp = (self.grad_y[p[0]][p[1]], -self.grad_x[p[0]][p[1]])\n dq = (self.grad_y[q[0]][q[1]], -self.grad_x[q[0]][q[1]])\n \n l = np.array([q[0]-p[0], q[1]-p[1]], np.float)\n if 0 not in l:\n l *= SQRT_0_5\n \n dp_l = np.dot(dp, l)\n l_dq = np.dot(l, dq)\n if dp_l < 0:\n dp_l = -dp_l\n l_dq = -l_dq\n \n # 2/3pi * ...\n return 0.212206590789 * (np.arccos(dp_l)+np.arccos(l_dq))",
"def backward(self, top, propagate_down, bottom):\n pass",
"def backward(self, top, propagate_down, bottom):\n pass",
"def backward(self, top, propagate_down, bottom):\n pass",
"def _backward(self, w):\n # Gradient sign depends on input label (0/1)\n if w is not None:\n return w[0] * -self.w + w[1] * self.w\n else:\n raise ValueError(\"w cannot be set as None.\")",
"def backward(eph, epdlogp):\n dW2 = np.dot(eph.T, epdlogp).ravel() # which means\n dh = np.outer(epdlogp, model['W2'])\n dh[eph<0] = 0\n dW1 = np.dot(dh.T, epx)\n\n return {'W1':dW1, 'W2':dW2}",
"def jacobian(self, p):\n delta = 1.\n props = {'density': self.density}\n xp, zp = self.x, self.z\n verts = self.verts\n x, z = p\n jac = np.transpose([\n (talwani.gz(xp, zp, [Polygon(verts + [[x + delta, z]], props)]) -\n talwani.gz(xp, zp, [Polygon(verts + [[x - delta, z]], props)])\n ) / (2. * delta),\n (talwani.gz(xp, zp, [Polygon(verts + [[x, z + delta]], props)]) -\n talwani.gz(xp, zp, [Polygon(verts + [[x, z - delta]], props)])\n ) / (2. * delta)])\n return jac",
"def inverse_el(el: Fp, p: int) -> Fp:\n return pow(int(el), p-2, p)"
] | [
"0.6597751",
"0.633653",
"0.6120033",
"0.59766096",
"0.5962403",
"0.56558436",
"0.5647515",
"0.56286216",
"0.5613398",
"0.56100065",
"0.56100065",
"0.5609343",
"0.55576277",
"0.5546407",
"0.5496917",
"0.54839945",
"0.5470489",
"0.5455691",
"0.54530066",
"0.54523903",
"0.5429198",
"0.5406027",
"0.5404209",
"0.5393396",
"0.5393396",
"0.5393396",
"0.5383679",
"0.53782594",
"0.53696144",
"0.5354494"
] | 0.7388151 | 0 |
Ensures label commands fail with too few arguments. | def testTooFewArgumentsFails(self):
invocations_missing_args = (
# Neither arguments nor subcommand.
['label'],
# Not enough arguments for 'set'.
['label', 'set'],
['label', 'set', 'filename'],
# Not enough arguments for 'get'.
['label', 'get'],
# Not enough arguments for 'ch'.
['label', 'ch'],
['label', 'ch', '-l', 'key:val'])
for arg_list in invocations_missing_args:
stderr = self.RunGsUtil(arg_list, return_stderr=True, expected_status=1)
self.assertIn('command requires at least', stderr)
# Invoking 'ch' without any changes gives a slightly different message.
stderr = self.RunGsUtil(
['label', 'ch', 'gs://some-nonexistent-foobar-bucket-name'],
return_stderr=True,
expected_status=1)
self.assertIn('Please specify at least one label change', stderr) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def invalid_args(event):\n\n s.sendReply(\n event,\n f'Please provide the proper arguments. Use \"@{s.BOT_NAME} help\" for help.',\n )",
"def test_is_valid_label_key_invalid_input():\n # test length violations\n assert not is_valid_label_key(key=None) # Too short\n assert not is_valid_label_key(key=\"\") # Too short\n assert not is_valid_label_key(key=f\"{'p' * 254}/n\") # prefix too long\n assert not is_valid_label_key(key=\"/n\") # prefix too short\n assert not is_valid_label_key(key=\"p/\") # name too short\n assert not is_valid_label_key(key=\"a\" * 254) # name too long\n assert not is_valid_label_key(key=f\"d/{'b'*64}\") # name too long\n # test first character violations (not alphanum)\n assert not is_valid_label_key(key=\"-a\")\n assert not is_valid_label_key(key=\".b\")\n assert not is_valid_label_key(key=\" c\")\n # test last character violations (not alphanum)\n assert not is_valid_label_key(key=\"a-\")\n assert not is_valid_label_key(key=\"b.\")\n assert not is_valid_label_key(key=\"c \")\n assert not is_valid_label_key(key=\"sw33T#\")\n # test middle characters violations\n assert not is_valid_label_key(key=\"a$$a\")\n assert not is_valid_label_key(key=\"b b\")",
"def test_stratis_bad_subcommand(self):\n for command_line in [\n [\"notasub\"],\n [\"daemon\", \"notasub\"],\n [\"pool\", \"notasub\"],\n [\"blockdev\", \"notasub\"],\n [\"filesystem\", \"notasub\"],\n ]:\n for prefix in [[], [\"--propagate\"]]:\n self.check_system_exit(prefix + command_line, _PARSE_ERROR)",
"def bad_args(args):\n PARSER.print_help()\n exit(0)",
"def check_num_arguments(self):\n if len(self.args) != 2:\n self.cli_parser.error(\"Please provide paths to an \"\n \"interactions file and an annotations file.\")",
"def __check_arg_count(self):\n if len(self.args) > 6 or len(self.args) == 0:\n raise ArgError(\"Unsupported amount of arguments! (\" + str(len(self.args)) + \")\")",
"def _is_label(self, words):\n if words[0] == 'label':\n if len(words) != 2:\n raise SyntaxError(\"File line {}: Invalid number of arguments for C_LABEL command.\".format(self._file_line))\n return True\n else:\n return False",
"def check_number_of_labels(n_labels, n_samples):\n if not 1 < n_labels < n_samples:\n raise ValueError(\"Number of labels is %d. Valid values are 2 \"\n \"to n_samples - 1 (inclusive)\" % n_labels)",
"def check_number_of_labels(n_labels, n_samples):\n if not 1 < n_labels < n_samples:\n raise ValueError(\"Number of labels is %d. Valid values are 2 \"\n \"to n_samples - 1 (inclusive)\" % n_labels)",
"def testTooFewArgumentsFails(self):\n # No arguments for get, but valid subcommand.\n stderr = self.RunGsUtil(self._get_web_cmd,\n return_stderr=True,\n expected_status=1)\n self.assertIn('command requires at least', stderr)\n\n # No arguments for set, but valid subcommand.\n stderr = self.RunGsUtil(self._set_web_cmd,\n return_stderr=True,\n expected_status=1)\n self.assertIn('command requires at least', stderr)\n\n # Neither arguments nor subcommand.\n stderr = self.RunGsUtil(['web'], return_stderr=True, expected_status=1)\n self.assertIn('command requires at least', stderr)",
"def cmd_needs_no_arg(self):\n self.respond(\"501 Syntax error: command does not accept arguments.\")",
"def validate_args(args) -> None:\n if args.input_file is not None:\n assert args.num_entries_per_input_and_label is not None, \"If 'input_file' is set, 'num_entries_per_input_and_label' must be set\"\n assert args.num_entries_per_label is None, \"If 'input_file' is set, 'num_entries_per_label' must not be set\"\n assert args.batch_size is None, \"If 'input_file' is set, batch_size must not be set as 'num_entries_per_input_and_label' also \" \\\n \"serves as batch size in this case\"\n else:\n assert args.num_entries_per_input_and_label is None, \"If 'input_file' is not set, 'num_entries_per_input_and_label' must not be set\"\n assert args.num_entries_per_label is not None, \"If 'input_file' is not set, 'num_entries_per_label' must be set\"\n assert args.batch_size is not None, \"If 'input_file' is not set, 'batch_size' must be set\"",
"def validate_args(args):\n\n if args.batch_size % args.batch_splits != 0:\n raise ValueError(BATCH_SIZE_SPLIT_ERR.format(args.batch_size, args.batch_splits))\n\n if args.data_parallel and args.model_parallel:\n raise ValueError(DATA_AND_MODEL_PARALLEL_ERR)\n\n if args.class_bal and args.year_weighted_class_bal:\n raise ValueError(CONFLICTING_WEIGHTED_SAMPLING_ERR)\n\n assert args.ten_fold_test_index in range(-1, 10)",
"def raise_not_enough_arguments(self, string):\n\n\t\trequested = errors.number(self.counter + 1)\n\n\t\tnumber = len(self.positional)\n\n\t\tverb = \"was\" if number == 1 else \"were\"\n\n\t\twhat = \"Requested {} formatting argument for \"\\\n\t\t\t \"'{}' but only {} {} supplied!\"\n\n\t\twhat = what.format(requested, string, number, verb)\n\n\t\traise errors.ArgumentError(what)",
"def test_bad_command1(self):\n with self.assertRaises(ValueError):\n command = Command('Fake Command1')",
"def test_invalidargs(clickrunner):\n for args in maincli.invalid_args:\n result = clickrunner.invoke(maincli.entrypoint, args)\n assert result.exit_code == 2\n assert \"no such option\" in result.output",
"def test_arg_env_invalid(self, dfparser, instruction, label):\n dfparser.lines = [\"FROM fedora\\n\",\n \"{0} v=v\\n\".format(instruction),\n \"LABEL TEST={0}\\n\".format(label)]\n try:\n dfparser.labels['TEST']\n except KeyError:\n pass",
"def test_label_not_in_config(self):\n with self.assertRaisesRegex(\n ValueError, 'The config \\'Label\\' field should contain the positive'\n ' class label.'):\n self.ci.run_with_metadata(\n indexed_inputs=self.dataset.indexed_examples,\n model=self.model,\n dataset=self.dataset,\n )",
"def validate_arguments(args):\n if not os.path.exists(args.training_dir):\n return \"{0} is not a valid directory\".format(args.training_dir)\n\n if not os.path.exists(args.test_dir):\n return \"{0} is not a valid directory\".format(args.test_dir)\n\n if not os.path.isfile(args.label_file):\n return \"{0} is not a valid file\".format(args.label_file)\n return None",
"def check_sanity(self):\n # ensure numeric labels\n try:\n list(map(int, flatten(self.labels[:1])))\n except ValueError as ve:\n error(\"Non-numeric label encountered: {}\".format(ve))\n except TypeError as ve:\n warning(\"Non-collection labelitem encountered: {}\".format(ve))",
"def validate_args(args):\n command = args[0]\n args_length = len(args) - 1\n return VALID_COMMANDS[command] == args_length",
"def checkLabel(label):\n\n label = str(label)\n if not label:\n raise ValueError('label cannot be empty string')\n\n label = str(label)\n\n if not label:\n raise ValueError('label cannot be empty string')\n\n if not label[0].isalpha():\n raise ValueError('label must start with a letter')\n\n if not (''.join(label.split('_'))).isalnum():\n raise ValueError('label may contain alphanumeric characters and '\n 'underscore, {0} is not valid'.format(label))\n\n if isReserved(label):\n raise ValueError('{0} is a reserved word and cannot be used '\n 'as a label'.format(repr(label)))\n\n if label in READONLY:\n raise AttributeError('{0} is read-only'.format(label))\n\n return label",
"def command_error(fmt, *args, **kwargs):\n raise CommandError(fmt.format(*args, **kwargs))",
"def test_invalidValues(self):\n argV = \"--fooint egg\".split()\n self.assertRaises(usage.UsageError, self.usage.parseOptions, argV)",
"def check_invalid_args_general(config):\n # Not mathematically correct, but might be required if prior is not\n # appropriate.\n if hasattr(config, 'kl_scale') and config.kl_scale != 1.0:\n warnings.warn('Prior matching term will be scaled by %f.'\n % config.kl_scale)\n\n if hasattr(config, 'store_final_model') and \\\n hasattr(config, 'train_from_scratch') and \\\n config.store_final_model and config.train_from_scratch:\n warnings.warn('Note, when training from scratch, the final model is ' +\n 'only trained on the last task!')",
"def test_enforcement_mode_update_command_when_blank_arguments_provided(\n err_msg, args, mock_client\n):\n with pytest.raises(ValueError) as err:\n update_enforcement_mode_command(mock_client, args)\n assert str(err.value) == err_msg",
"def test_unrecognized_flag():\n parser = CmdParser([noArgs, onearg])\n with pytest.raises(CmdParseError):\n out = parser.parse(\"onearg -a -b\")",
"def check_args():\n assert os.path.exists(FLAGS.datadir)\n assert os.path.exists(FLAGS.trainlogdir)\n assert os.path.exists(FLAGS.split_fn)\n assert os.path.exists(FLAGS.labels_fname)\n assert FLAGS.snippet_len >= 1\n assert FLAGS.frameskip >= 1\n assert FLAGS.test_stride == 1 or FLAGS.test_stride == FLAGS.snippet_len, \\\n 'test_stride has to be either 1 or snippet_len (for vanilla+)'\n pass",
"def __check_errors(self):\n if not(\"input\" in self.passedArgs or \"source\" in self.passedArgs):\n raise ArgError(\"Program did not receive any of mandatory arguments! (--source=file, --input=file)\")",
"def check_num_arguments(self):\n if len(self.args) != 3:\n self.cli_parser.error(\n \"Please provide paths to an interactions file, \"\n \"an annotations file, and an expressions file.\"\n )"
] | [
"0.6378849",
"0.617358",
"0.61700785",
"0.6165488",
"0.6146504",
"0.61405295",
"0.6113329",
"0.60842806",
"0.60842806",
"0.60597503",
"0.60424715",
"0.6028631",
"0.6018959",
"0.60188454",
"0.6001575",
"0.59583277",
"0.593172",
"0.58977133",
"0.58879304",
"0.58873475",
"0.5849547",
"0.5842409",
"0.5831417",
"0.58290553",
"0.58226854",
"0.5818565",
"0.58078",
"0.58073956",
"0.5803887",
"0.57977605"
] | 0.7460278 | 0 |
Generates dynamically a sql update query by eliminating the columns that have value to None | def sql_filtered_update(table, set_columns, where_columns, values):
for index in range(len(set_columns) - 1, -1, -1):
if values[index] is None:
del set_columns[index]
del values[index]
set_columns = [col + ' = ?' for col in set_columns]
columns_to_set = ', '.join(set_columns)
where_columns = [col + ' = ?' for col in where_columns]
where_condition = ' AND '.join(where_columns)
query = f'UPDATE {table} SET {columns_to_set} WHERE {where_condition}'
return query, values | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_scrub_sql():\r\n # it seems incredibly hard to get SQLAlchemy to emit a fully-compiled SQL\r\n # string that including data values. i gave up after trying this method with\r\n # the \"dialect\" sqlalchemy.dialects.mysql.mysqldb.MySQLDialect()\r\n # https://sqlalchemy.readthedocs.org/en/latest/faq/sqlexpressions.html\r\n # #how-do-i-render-sql-expressions-as-strings-possibly-with-bound\r\n # -parameters-inlined\r\n sql_format = (\"update %(table)s set %(col)s = %(sub_value)s \"\r\n \"where %(col)s is not null;\")\r\n return '\\n'.join(\r\n sql_format % dict(table=c.table.name, col=c.name, sub_value=v)\r\n for c, v in get_scrub_columns().iteritems())",
"def getSQL_update_ex(table, dict):\n kvs = ''\n kvs_where = ''\n for k, v in dict.items():\n if k.startswith('where'):\n kvs_where += k[5:] + '='\n if isNumber(v) or v == 'null':\n kvs_where += str(v) + ' and '\n else:\n kvs_where += \"'\" + v + \"' and \"\n else:\n if not v:\n continue\n if isNumber(v) or v == 'null':\n kvs += k + '=' + str(v) + ','\n else:\n kvs += k + \"='\" + v + \"',\"\n\n if kvs_where == '':\n return 'UPDATE %s SET %s' % (table, kvs[:-1])\n return 'UPDATE %s SET %s WHERE %s' % (table, kvs[:-1], kvs_where[:-4])",
"def getSQL_update(table, **kwargs):\n kvs = ''\n kvs_where = ''\n for k, v in kwargs.items():\n if k.startswith('where'):\n kvs_where += k[5:] + '='\n if isNumber(v) or v == 'null':\n kvs_where += str(v) + ' and '\n else:\n kvs_where += \"'\" + v + \"' and \"\n else:\n if not v:\n continue\n if isNumber(v) or v == 'null':\n kvs += k + '=' + str(v) + ','\n else:\n kvs += k + \"='\" + v + \"',\"\n\n if kvs_where == '':\n return 'UPDATE %s SET %s' % (table, kvs[:-1])\n return 'UPDATE %s SET %s WHERE %s' % (table, kvs[:-1], kvs_where[:-4])",
"def test_update_from_none(self):\r\n ctx = {}\r\n col = columns.Set(columns.Integer, db_field=\"TEST\")\r\n statements = col.get_update_statement({1, 2, 3, 4}, None, ctx)\r\n\r\n #only one variable /statement should be generated\r\n assert len(ctx) == 1\r\n assert len(statements) == 1\r\n\r\n assert ctx.values()[0].value == {1, 2, 3, 4}\r\n assert statements[0] == '\"TEST\" = :{}'.format(ctx.keys()[0])",
"def test_update_from_none(self):\r\n ctx = {}\r\n col = columns.List(columns.Integer, db_field=\"TEST\")\r\n statements = col.get_update_statement([1, 2, 3], None, ctx)\r\n\r\n #only one variable /statement should be generated\r\n assert len(ctx) == 1\r\n assert len(statements) == 1\r\n\r\n assert ctx.values()[0].value == [1, 2, 3]\r\n assert statements[0] == '\"TEST\" = :{}'.format(ctx.keys()[0])",
"def update_records(cursor,table_schema,table_name,column_name,value):\n update_records = \"UPDATE \" + table_schema + \".\" + table_name + \" SET \" + column_name + \"='\" + value + \"' WHERE COALESCE(\" + column_name + \",'')='';\"\n cursor.execute(update_records)",
"def create_update_code(name_of_table, col_names=[None], col_values=[None], where_col = None, where_equals_value = None):\n sql_str = ''\n try:\n if col_names is None:\n raise Exception('You must provide at least one column name!')\n elif col_values is None:\n raise Exception('You must provide at least one column value!')\n elif len(col_names) != len(col_values):\n raise Exception('You must provide one value for each column')\n elif where_col is None or where_equals_value is None:\n raise Exception('You must provide a where column and an equals value')\n else:\n sql_str = 'UPDATE ' + str(name_of_table).strip() + '\\nSET\\n\\t'\n counter = 0\n while counter < len(col_names):\n sql_str += str(col_names[counter]).strip() \\\n + ' = ' + str(col_values[counter]).strip() + ', \\n\\t'\n counter += 1\n sql_str = (sql_str.strip())[0:-1] + '' # Strip off the last comma\n sql_str += '\\nWHERE ' + where_col + \" = \" + where_equals_value\n except Exception as e:\n raise Exception('Error in create_update_code(): ' + e.__str__())\n return sql_str",
"def sql(self):\n\n if not self._table_names:\n raise ValueError('UPDATE requires at least one table')\n if not self._values and not self._values_raw:\n raise ValueError('UPDATE requires at least one value')\n\n table_refs = [', '.join(self._table_names)]\n param_values = []\n col_names = []\n inline_values = []\n set_values = []\n\n self._append_join_table_refs(self._table_names[0], table_refs)\n\n if self._values:\n for col, val in self._values.iteritems():\n col_names.append(col)\n self._parameterize_values(val, inline_values, param_values)\n\n for col in self._values_raw:\n val, val_params = self._values_raw[col]\n col_names.append(col)\n inline_values.append(val)\n if val_params is not None and self.placeholder:\n param_values.extend(val_params)\n\n assert len(col_names) == len(inline_values)\n for col, val in zip(col_names, inline_values):\n set_values.append(u'{0}={1}'.format(self.quote_col_ref(col), val))\n\n # MySQL UPDATE syntax as of 5.7:\n #\n # Single-table syntax:\n #\n # UPDATE [LOW_PRIORITY] [IGNORE] table_reference\n # SET col_name1={expr1|DEFAULT} [, col_name2={expr2|DEFAULT}] ...\n # [WHERE where_condition]\n # [ORDER BY ...]\n # [LIMIT row_count]\n #\n # Multiple-table syntax:\n #\n # UPDATE [LOW_PRIORITY] [IGNORE] table_references\n # SET col_name1={expr1|DEFAULT} [, col_name2={expr2|DEFAULT}] ...\n # [WHERE where_condition]\n\n sql = ['UPDATE']\n\n if self.query_options:\n sql.extend(self.query_options)\n\n if self.ignore_error:\n sql.append('IGNORE')\n\n sql.append(' '.join(table_refs))\n\n sql.append('SET')\n sql.append(', '.join(set_values))\n\n if self._where_cond_root.has_conds:\n sql.append('WHERE')\n sql.append(self._where_cond_root.sql(param_values))\n\n if self._orderby_conds:\n if len(self._table_names) + len(self._join_refs) > 1:\n raise ValueError('Multiple-table UPDATE does not support ORDER BY')\n\n sql.append('ORDER BY')\n sql.append(', '.join(self._orderby_conds))\n\n if self._limit:\n if len(self._table_names) + len(self._join_refs) > 1:\n raise ValueError('Multiple-table UPDATE does not support LIMIT')\n\n sql.append('LIMIT {0}'.format(self._limit))\n\n if self.placeholder:\n return ' '.join(sql), param_values if param_values else None\n assert not param_values\n return ' '.join(sql)",
"def test_update_from_empty(self):\r\n ctx = {}\r\n col = columns.Set(columns.Integer, db_field=\"TEST\")\r\n statements = col.get_update_statement({1, 2, 3, 4}, set(), ctx)\r\n\r\n #only one variable /statement should be generated\r\n assert len(ctx) == 1\r\n assert len(statements) == 1\r\n\r\n assert ctx.values()[0].value == {1, 2, 3, 4}\r\n assert statements[0] == '\"TEST\" = :{}'.format(ctx.keys()[0])",
"def test_update_from_empty(self):\r\n ctx = {}\r\n col = columns.List(columns.Integer, db_field=\"TEST\")\r\n statements = col.get_update_statement([1, 2, 3], [], ctx)\r\n\r\n #only one variable /statement should be generated\r\n assert len(ctx) == 1\r\n assert len(statements) == 1\r\n\r\n assert ctx.values()[0].value == [1, 2, 3]\r\n assert statements[0] == '\"TEST\" = :{}'.format(ctx.keys()[0])",
"def generate_update_sql(self, fieldupdate, condition):\n return \"UPDATE %s SET %s WHERE %s\" % (self.tablename, fieldupdate, condition)",
"def for_update_clause(self, select):\n return ''",
"def sql_filtered_insert(table, set_columns, values):\n for index in range(len(set_columns) - 1, -1, -1):\n if values[index] is None:\n del set_columns[index]\n del values[index]\n values_fields = ['?'] * len(set_columns)\n query_columns = ', '.join(set_columns)\n values_fields = ', '.join(values_fields)\n query = f'INSERT INTO {table} ({query_columns}) VALUES ({values_fields})'\n return query, values",
"def update(self, **values):\r\n if not values:\r\n return\r\n\r\n nulled_columns = set()\r\n us = UpdateStatement(self.column_family_name, where=self._where, ttl=self._ttl, timestamp=self._timestamp)\r\n for name, val in values.items():\r\n col_name, col_op = self._parse_filter_arg(name)\r\n col = self.model._columns.get(col_name)\r\n # check for nonexistant columns\r\n if col is None:\r\n raise ValidationError(\"{}.{} has no column named: {}\".format(self.__module__, self.model.__name__, col_name))\r\n # check for primary key update attempts\r\n if col.is_primary_key:\r\n raise ValidationError(\"Cannot apply update to primary key '{}' for {}.{}\".format(col_name, self.__module__, self.model.__name__))\r\n\r\n val = col.validate(val)\r\n if val is None:\r\n nulled_columns.add(col_name)\r\n continue\r\n\r\n # add the update statements\r\n if isinstance(col, Counter):\r\n # TODO: implement counter updates\r\n raise NotImplementedError\r\n elif isinstance(col, (List, Set, Map)):\r\n if isinstance(col, List):\r\n klass = ListUpdateClause\r\n elif isinstance(col, Set):\r\n klass = SetUpdateClause\r\n elif isinstance(col, Map):\r\n klass = MapUpdateClause\r\n else:\r\n raise RuntimeError\r\n us.add_assignment_clause(klass(col_name, col.to_database(val), operation=col_op))\r\n else:\r\n us.add_assignment_clause(AssignmentClause(\r\n col_name, col.to_database(val)))\r\n\r\n if us.assignments:\r\n self._execute(us)\r\n\r\n if nulled_columns:\r\n ds = DeleteStatement(self.column_family_name, fields=nulled_columns, where=self._where)\r\n self._execute(ds)",
"def _delete_null_columns(self):\r\n ds = DeleteStatement(self.column_family_name)\r\n deleted_fields = False\r\n for _, v in self.instance._values.items():\r\n col = v.column\r\n if v.deleted:\r\n ds.add_field(col.db_field_name)\r\n deleted_fields = True\r\n elif isinstance(col, Map):\r\n uc = MapDeleteClause(col.db_field_name, v.value, v.previous_value)\r\n if uc.get_context_size() > 0:\r\n ds.add_field(uc)\r\n deleted_fields = True\r\n\r\n if deleted_fields:\r\n for name, col in self.model._primary_keys.items():\r\n ds.add_where_clause(WhereClause(\r\n col.db_field_name,\r\n EqualsOperator(),\r\n col.to_database(getattr(self.instance, name))\r\n ))\r\n self._execute(ds)",
"def build_upsert_query(engine, table_name, src_dict, do_update=False):\n def _for_pgsql(value, dtype):\n \"\"\"\n Converts a python datatype to the appropriate string (including, e.g., \\\n the necessary single quotes and/or brackets ) for use in a raw \\\n postgresql query.\n Args:\n value: (various datatypes) the value in question\n dtype: str, the datatype\n Returns:\n str, with the necessary formatting\n \"\"\"\n if dtype.startswith(('int', 'float', 'double', 'numeric')):\n if value is None:\n return \"Null\"\n elif str(value).lower() == 'nan':\n return \"'nan'\"\n elif dtype.endswith('[]'):\n value = ', '.join([str(v) for v in value])\n value = \"'{\" + value + \"}'\"\n return value\n else:\n return str(value)\n elif dtype.startswith('time'):\n if value is None:\n return \"Null\"\n else:\n return \"'\" + str(value) + \"'\"\n elif dtype.startswith('bool'):\n if value is None:\n raise ValueError(\"Error: bool should not be None.\")\n else:\n if str(value).startswith(('t', 'T')):\n return str(True)\n else:\n return str(False)\n elif dtype.startswith('json'):\n # In this case, value itself should be a dict\n value = ','.join(['\"{k}\":\"{v}\"'.format(k=k, v=v)\n for k, v in value.items()])\n value = \"'{\" + value + \"}'\"\n return value\n elif dtype == 'text[]':\n value = ', '.join(['\"' + str(v) + '\"' for v in value])\n value = \"'{\" + str(value) + \"}'\"\n return value\n else:\n if str(value).startswith('$delim$') and\\\n str(value).endswith('$delim$'):\n return str(value)\n if '::' in str(value):\n value = str(value).split(\"::\")[0].strip(\"'\")\n return \"'\" + str(value) + \"'\"\n\n def _get_values(uprow, types):\n \"\"\"\n Gets a list of values for use in a raw SQL query, e.g.,\n\n INSERT INTO table_name\n (column1, column2, ...)\n VALUES\n (value1, value2, ...);\n\n This function returns a string \"value1, value2, ...\"\n Args:\n uprow: dict, containing the values\n types: dict, containing the data types of the values\n Return:\n str, containing the values as described above.\n \"\"\"\n tmp_uprow = {k: _for_pgsql(v, types[k]) for k, v in uprow.items()}\n mappable = \",\".join([\"{\" + str(k) + \"}\" for k in uprow.keys()])\n values = mappable.format(**tmp_uprow)\n return values\n\n def _get_set_pairs(uprow, types):\n \"\"\"\n Gets a list of \"set pairs\" for use in a raw SQL query, e.g.,\n\n INSERT INTO table_name\n (column1, column2, ...)\n VALUES\n (value1, value2, ...)\n ON CONFLOCT (column1) DO UPDATE\n SET\n column1=value1,\n column2=value2\n\n This function returns a string \"column1=value1, column=value2\n Args:\n uprow: dict, containing the values\n types: dict, containing the data types of the values\n Return:\n str, containing the \"set pairs\" as described above.\n \"\"\"\n pairs = []\n for key, val in uprow.items():\n pairs.append(\"{0}={1}\".format(key, _for_pgsql(val, types[key])))\n return \", \".join(pairs)\n\n # Mirror table from DB\n meta = sqla.MetaData(bind=engine)\n insp = sqla.inspect(engine)\n table = sqla.Table(table_name, meta, autoload=True, autoload_with=engine)\n table_cols = [str(col).split('.')[1] for col in table.columns]\n\n # Collect dict entries that also appear in the table as a \"row\"\n uprow = {key: src_dict[key] for key in src_dict if key in table_cols}\n\n # Load defaults and collect types\n types = {}\n for column in insp.get_columns(table_name, default=True):\n name = column['name']\n if (name not in uprow) and (name != 'id'):\n uprow[name] = column['default']\n types[name] = str(column['type']).lower()\n\n # Build base query\n columns = \"{keylist}\".format(keylist=', '.join(uprow.keys()))\n values = _get_values(uprow, types)\n\n query = \"INSERT INTO {table_name}\\n\".format(table_name=table_name)\n query += \"({columns})\\n\".format(columns=columns)\n query += \"VALUES\\n\"\n query += \"({values})\\n\".format(values=values)\n\n # Fetch unique columns\n unique_constraints = insp.get_unique_constraints(table_name)\n\n # Handle potential conflicts\n if len(unique_constraints) > 0:\n unique_cols = insp.get_unique_constraints(table_name)[\n 0]['column_names']\n if len(unique_cols) > 1:\n unique_cols = \", \".join([str(col) for col in list(unique_cols)])\n else:\n unique_cols = ', '.join(unique_cols)\n if do_update:\n set_clause = \"ON CONFLICT ({unique_cols}) DO UPDATE SET\\n\".\\\n format(unique_cols=unique_cols)\n set_clause += _get_set_pairs(uprow, types)\n query += set_clause\n else:\n query += \"ON CONFLICT ({unique_cols}) DO NOTHING\\n\".\\\n format(unique_cols=unique_cols)\n else:\n # No unique constraints, look for primary key instead\n primary_key = [c for c in table.columns if c.primary_key]\n if len(primary_key) == 1:\n primary_key, = primary_key\n # Ditch reference to foreign table\n if '.' in str(primary_key):\n primary_key = str(primary_key).split('.')[-1]\n else:\n tmp = []\n for col in primary_key:\n # Ditch reference to foreign table\n if '.' in str(col):\n col = str(col).split('.')[-1]\n tmp.append(col)\n primary_key = \", \".join(tmp)\n if do_update:\n set_clause = \"ON CONFLICT ({primary_key}) DO UPDATE SET\\n\".\\\n format(primary_key=primary_key)\n set_clause += _get_set_pairs(uprow, types)\n query += set_clause\n else:\n query += \"ON CONFLICT ({primary_key}) DO NOTHING\\n\".\\\n format(primary_key=primary_key)\n\n query += ';'\n\n return query",
"def db_update(table: str, columns: list, values: list, where: str):\n\n db, c = start_db()\n\n vals = ', '.join([f'{c}=\"{v}\"' for c, v in zip(columns, values)])\n\n if where:\n query = f'UPDATE {table} SET {vals} WHERE {where}'\n else:\n query = f'UPDATE {table} SET {vals}'\n\n c.execute(query)\n db.commit()\n db.close()",
"def test_null_update_deletes_column(self):\n partition = uuid4()\n for i in range(5):\n TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i))\n\n # sanity check\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\n self.assertEqual(row.cluster, i)\n self.assertEqual(row.count, i)\n self.assertEqual(row.text, str(i))\n\n # perform update\n TestQueryUpdateModel.objects(partition=partition, cluster=3).update(text=None)\n\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\n self.assertEqual(row.cluster, i)\n self.assertEqual(row.count, i)\n self.assertEqual(row.text, None if i == 3 else str(i))",
"def default_missing_values(cur, conn):\n for query in default_values_queries:\n cur.execute(query)\n conn.commit()",
"def test_partial_update_creation(self):\r\n ctx = {}\r\n col = columns.Set(columns.Integer, db_field=\"TEST\")\r\n statements = col.get_update_statement({1, 2, 3, 4}, {2, 3, 4, 5}, ctx)\r\n\r\n assert len([v for v in ctx.values() if {1} == v.value]) == 1\r\n assert len([v for v in ctx.values() if {5} == v.value]) == 1\r\n assert len([s for s in statements if '\"TEST\" = \"TEST\" -' in s]) == 1\r\n assert len([s for s in statements if '\"TEST\" = \"TEST\" +' in s]) == 1",
"def _raw_sql(self, values):\n if isinstance(self.model._meta.pk, CharField):\n when_clauses = ' '.join([self._when(\"'{}'\".format(x), y) for (x, y) in values])\n else:\n when_clauses = ' '.join([self._when(x, y) for (x, y) in values])\n table_name = self.model._meta.db_table\n primary_key = self.model._meta.pk.column\n return 'SELECT CASE {}.\"{}\" {} ELSE 0 END'.format(table_name, primary_key, when_clauses)",
"def put(self,colname,value,**kwargs):\n arguments = {'where' : \"String e.g 'chainID = 'A''\",\n 'index' : \"Array e.g. [27,28,30]\",\n 'name' : \"'CA' atome name\",\n 'query' : \"SQL query e.g. 'WHERE chainID='B' AND resName='ASP' \"}\n\n # the asked keys\n keys = kwargs.keys()\n\n # if we have more than one key we kill it\n if len(keys)>1 :\n print('You can only specify 1 conditional statement for the pdb2sql.put function')\n return\n\n # check if the column exists\n try:\n self.c.execute(\"SELECT EXISTS(SELECT {an} FROM ATOM)\".format(an=colname))\n except:\n print('Error column %s not found in the database' %colname)\n self.get_colnames()\n return\n\n\n # if we have 0 key we take the entire db\n if len(kwargs) == 0:\n query = 'UPDATE ATOM SET {cn}=?'.format(cn=colname)\n value = tuple([value])\n self.c.execute(query,value)\n return\n\n # otherwise we have only one key\n key = list(keys)[0]\n cond = kwargs[key]\n\n # select which key we have\n if key == 'where':\n query = 'UPDATE ATOM SET {cn}=? WHERE {cond}'.format(cn=colname,cond=cond)\n value = tuple([value])\n self.c.execute(query,value)\n\n elif key == 'name' :\n values = tuple([value,cond])\n query = 'UPDATE ATOM SET {cn}=? WHERE name=?'.format(cn=colname)\n self.c.execute(query,values)\n\n elif key == 'index' :\n values = tuple([value] + [v+1 for v in cond])\n qm = ','.join(['?' for i in range(len(cond))])\n query = 'UPDATE ATOM SET {cn}=? WHERE rowID in ({qm})'.format(cn=colname,qm=qm)\n self.c.execute(query,values)\n\n elif key == 'query' :\n query = 'UPDATE ATOM SET {cn}=? {c1}'.format(cn=colname,c1=cond)\n value = tuple([value])\n self.c.execute(query,value)\n\n else:\n print('Error arguments %s not supported in pdb2sql.get()\\nOptions are:\\n' %(key))\n for posskey,possvalue in arguments.items():\n print('\\t' + posskey + '\\t\\t' + possvalue)\n return",
"def test_mixed_value_and_null_update(self):\n partition = uuid4()\n for i in range(5):\n TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i))\n\n # sanity check\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\n self.assertEqual(row.cluster, i)\n self.assertEqual(row.count, i)\n self.assertEqual(row.text, str(i))\n\n # perform update\n TestQueryUpdateModel.objects(partition=partition, cluster=3).update(count=6, text=None)\n\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\n self.assertEqual(row.cluster, i)\n self.assertEqual(row.count, 6 if i == 3 else i)\n self.assertEqual(row.text, None if i == 3 else str(i))",
"def _format_sql(self, trade, table):\n\n trade = copy(trade)\n for key, value in trade.items():\n\n if value is None:\n trade[key] = 'NULL'\n elif key == 'date':\n value = tb.DateConvert(value).date\n\n if isinstance(value, str):\n trade[key] = f\"'{value}'\"\n\n return {k:v for k,v in trade.items() if k in self.fields[table]}",
"def update_row(table_str, attribute_value_dict, keys_dict): #works\n sql = make_update_row(table_str, attribute_value_dict, keys_dict)\n #print sql\n execute_edit_queries(sql)",
"def test_null_update_deletes_column(self):\r\n partition = uuid4()\r\n for i in range(5):\r\n TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i))\r\n\r\n # sanity check\r\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\r\n assert row.cluster == i\r\n assert row.count == i\r\n assert row.text == str(i)\r\n\r\n # perform update\r\n TestQueryUpdateModel.objects(partition=partition, cluster=3).update(text=None)\r\n\r\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\r\n assert row.cluster == i\r\n assert row.count == i\r\n assert row.text == (None if i == 3 else str(i))",
"def make_update_row(table_str, attribute_value_dict, keys_dict): #works\n\n # initialize query statement and input\n sql = \"\"\"UPDATE `%s` SET %s = '%s'\"\"\"\n # get attributes from the dictionaries and initialize list for values\n attributes_list = attribute_value_dict.keys()\n value_list = []\n list_of_key_attributes = keys_dict.keys()\n list_of_key_values = []\n # create list of values from the dictionary\n for key in attributes_list:\n value_list += [attribute_value_dict[key]]\n for key in list_of_key_attributes:\n list_of_key_values += [keys_dict[key]]\n input_format = (table_str, attributes_list[0], value_list[0])\n # add set's to the query statemtens and input\n for i in range(1, len(attributes_list)):\n sql += \", %s = '%s'\"\n input_format += (attributes_list[i], value_list[i])\n # add the Where-clause\n sql += \" WHERE %s = '%s'\"\n input_format += (list_of_key_attributes[0], list_of_key_values[0])\n # add an AND to the Where-clause for multi-keys\n for i in range(1, len(list_of_key_attributes)):\n sql += \" AND %s = '%s'\"\n input_format += (list_of_key_attributes[i], list_of_key_values[i])\n # combine query & input, run\n sql = sql % input_format\n return sql",
"def update_settings_info(update_dict,id,con,cur):\n psql=\"update settings set \"\n psql_update=\"\"\n for (key,value) in update_dict.items():\n psql_update=f\"{key}='{value}',\"+psql_update\n\n condition=f\" where id='{id}';\"\n psql=psql+psql_update[:-1]+condition\n cur.execute(psql)\n con.commit()",
"def replace_params(self):\n raw_sql = self.raw_sql\n for placeholder in self.to_replace:\n newreg = re.compile(placeholder)\n repl = self.get_replacement_value(placeholder)\n if repl:\n raw_sql = newreg.sub(str(repl), raw_sql)\n self.sql = raw_sql",
"def update_one_address(update_dict,id,id_address,con,cur):\n psql=\"update address set \"\n psql_update=\"\"\n for (key,value) in update_dict.items():\n psql_update=f\"{key}='{value}',\"+psql_update\n\n condition=f\" where extern_id='{id_address}' and extern_client_id='{id}';\"\n psql=psql+psql_update[:-1]+condition\n cur.execute(psql)\n con.commit()"
] | [
"0.7133753",
"0.7039731",
"0.69529456",
"0.6863801",
"0.67174447",
"0.6690802",
"0.6452646",
"0.64026815",
"0.63402575",
"0.62404656",
"0.620921",
"0.61646706",
"0.60941637",
"0.6006477",
"0.5963547",
"0.58923066",
"0.58101135",
"0.57359624",
"0.57193387",
"0.5708814",
"0.5696036",
"0.5615125",
"0.55723757",
"0.55677956",
"0.55647683",
"0.55620813",
"0.55454016",
"0.5511596",
"0.54935765",
"0.5490712"
] | 0.7526104 | 0 |
Generates dynamically a sql insert query by eliminating the columns that have value to None | def sql_filtered_insert(table, set_columns, values):
for index in range(len(set_columns) - 1, -1, -1):
if values[index] is None:
del set_columns[index]
del values[index]
values_fields = ['?'] * len(set_columns)
query_columns = ', '.join(set_columns)
values_fields = ', '.join(values_fields)
query = f'INSERT INTO {table} ({query_columns}) VALUES ({values_fields})'
return query, values | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_insert_query(self, query, columns, table_name):\n cols = \"\"\n values = \"\"\n on_dupe_values = \"\"\n\n for column in columns:\n cols += \"`{}`, \".format(column)\n values += \"%({})s, \".format(column)\n on_dupe_values += \"{} = VALUES({}), \".format(column, column)\n\n # Remove trailing whitespace and commas\n cols = cols.rstrip().rstrip(\",\")\n values = values.rstrip().rstrip(\",\")\n on_dupe_values = on_dupe_values.rstrip().rstrip(\",\")\n\n query = query.format(table_name=table_name, cols=cols, values=values, on_dupe_values=on_dupe_values)\n return query",
"def getSQL_insert(table, **kwargs):\n ks = ''\n vs = ''\n for k, v in kwargs.items():\n ks += k + ','\n if isNumber(v) or v == 'null':\n vs += str(v) + ','\n else:\n vs += \"'\" + v + \"',\"\n\n return 'INSERT INTO %s (%s) VALUES (%s)' % (table, ks[:-1], vs[:-1])",
"def getSQL_insert_ex(table, dict):\n ks = ''\n vs = ''\n for k, v in dict.items():\n ks += k + ','\n if v == None:\n v = 'null'\n if isNumber(v) or v == 'null':\n vs += str(v) + ','\n elif str(type(v)) == \"<type 'datetime.datetime'>\":\n vs += \"'\" + v.strftime('%Y-%m-%d %H:%M:%S') + \"',\"\n elif str(type(v)) == \"<type 'datetime.time'>\":\n vs += \"'\" + v.strftime('%H:%M:%S') + \"',\"\n else:\n vs += \"'\" + v + \"',\"\n\n return 'INSERT INTO %s (%s) VALUES (%s)' % (table, ks[:-1], vs[:-1])",
"def generate_insert_sql(self, fields, values):\n return \"INSERT INTO %s (%s) VALUES(%s)\" % (self.tablename, fields,\n ','.join([sql_quote(x) for x in values]))",
"def _insert_sql(self, table, insert_values):\n if isinstance(insert_values, dict):\n keys = []\n values = []\n for k, v in insert_values.items():\n keys.append(k)\n if v is None:\n values.append('')\n elif isinstance(v, str):\n v = \"'\" + str(v).replace(\"'\", \"''\") + \"'\"\n values.append(v)\n elif isinstance(v, datetime.datetime):\n v = \"'\" + str(v) + \"'\"\n values.append(v)\n else:\n values.append(str(v))\n keys = \",\".join(keys)\n values = \",\".join(values)\n sql = \"INSERT INTO %s (%s) VALUES (%s)\" % (table, keys, values)\n return sql\n elif isinstance(insert_values, (tuple, list)):\n values = []\n for v in insert_values:\n if v is None:\n values.append('')\n elif isinstance(v, str):\n v = \"'\" + str(v).replace(\"'\", \"''\") + \"'\"\n values.append(v)\n elif isinstance(v, datetime.datetime):\n v = \"'\" + str(v) + \"'\"\n values.append(v)\n else:\n values.append(str(v))\n values = \",\".join(values)\n sql = \"INSERT INTO %s VALUES (%s)\" % (table, values)\n return sql\n else:\n raise TypeError( # pragma: no cover\n \"unexpected type: \" + str(type(insert_values)))",
"def get_scrub_sql():\r\n # it seems incredibly hard to get SQLAlchemy to emit a fully-compiled SQL\r\n # string that including data values. i gave up after trying this method with\r\n # the \"dialect\" sqlalchemy.dialects.mysql.mysqldb.MySQLDialect()\r\n # https://sqlalchemy.readthedocs.org/en/latest/faq/sqlexpressions.html\r\n # #how-do-i-render-sql-expressions-as-strings-possibly-with-bound\r\n # -parameters-inlined\r\n sql_format = (\"update %(table)s set %(col)s = %(sub_value)s \"\r\n \"where %(col)s is not null;\")\r\n return '\\n'.join(\r\n sql_format % dict(table=c.table.name, col=c.name, sub_value=v)\r\n for c, v in get_scrub_columns().iteritems())",
"def makeInserts(self, tbl, columns, rows):\n\n lines = []\n lines.append(\"INSERT ALL\")\n\n stmt = ' INTO \"{}\" ({})'.format(\n tbl,\n \", \".join(['\"'+name+'\"' for name, *_ in columns])\n )\n\n fmt = ' VALUES ({})'.format(\n \", \".join([\"{}\" if t in ('NUMBER') else \"'{}'\" \n for n, t, *_ in columns])\n )\n\n print(stmt)\n print(fmt)\n\n for row in rows:\n lines.append(stmt)\n lines.append(fmt.format(*row))\n \n\n lines.append(\"SELECT * FROM DUAL\")\n\n return \"\\n\".join(lines)",
"def makeInserts(self, tbl, columns, rows):\n\n lines = []\n lines.append(\"INSERT ALL\")\n\n stmt = ' INTO \"{}\" ({})'.format(\n tbl,\n \", \".join(['\"'+name+'\"' for name, *_ in columns])\n )\n\n fmt = ' VALUES ({})'.format(\n \", \".join([\"{}\" if t in ('NUMBER') else \"'{}'\" \n for n, t, *_ in columns])\n )\n\n print(stmt)\n print(fmt)\n\n for row in rows:\n lines.append(stmt)\n lines.append(fmt.format(*row))\n \n\n lines.append(\"SELECT * FROM DUAL\")\n\n return \"\\n\".join(lines)",
"def create_insert_code(name_of_table, col_names=[None], col_values=[None],):\n sql_str = ''\n try:\n if col_names is None:\n raise Exception('You must provide at least one column name!')\n else:\n sql_str = 'INSERT INTO ' + str(name_of_table).strip() + '\\n('\n for col in col_names:\n sql_str += str(col) + ', '\n sql_str = sql_str[0:-2] + ')' # Strip off the last comma\n if col_values is None:\n raise Exception('You must provide at least one column value!')\n else:\n sql_str += '\\nVALUES\\n('\n for col in col_values:\n sql_str += str(col) + ', '\n sql_str = sql_str[0:-2] + ');' # Strip off the last comma\n except Exception as e:\n raise Exception('Error in create_insert_code(): ' + e.__str__())\n return sql_str",
"def get_sql_template(table):\n if table == 'account_position':\n sql = \"insert into account_position values \" \\\n \"('%s', '%s', '%s', '0', '%f', '%f', '%f', '0', '0', '%f', '%f', '%f', '0', '0'\" \\\n \",'0', '0', '0', null, null, null, null, null, '0', '0', \" \\\n \"'0', '0', '%f', '0', '0', '%s');\"\n\n elif table == 'pf_position':\n sql = \"insert into pf_position values \" \\\n \"('%s', '%s', '%s', '0', '%f', '%f', '%f', '0', '0', '%f', \" \\\n \"'%f', '%f', '0', '0', '0', '0', null, '1',\" \\\n \" '0', '0', '0', '0', '0', '0', '0', '0', '%f', '0', '0', '0', null);\"\n\n elif table == 'account_trade_restrictions':\n sql = \"insert into account_trade_restrictions values \" \\\n \"('%s', '%s', '18', '0', '1000', '0', '1000', '0', '2000', \" \\\n \"'0', '3000', '0', '1000', '0', '1000', '0','1000', '1000', '0.9',\" \\\n \" '1000', '0.2', '0.1','100000000', '0', '0', '0', \" \\\n \"'0', '0','0' )\"\n elif table == 'instrument':\n sql = \"select ticker, pre_price from instrument where ticker= '%s'\"\n\n elif table == 'pf_account':\n sql = \"insert into pf_account values\" \\\n \"('%s', '%s', '%s', '%s', null, '');\"\n else:\n platform_logger.error(\"input wrong table '%s'\" % table)\n return ''\n return sql",
"def build_insert_expression(data_dict, table_to_insert):\n\n command_str_left = \"INSERT INTO \" + table_to_insert + \" (\"\n command_str_right = \" VALUES (\"\n\n for field_name in data_dict:\n field_value = data_dict[field_name][0]\n flg_string_type = data_dict[field_name][1]\n if command_str_left[-1] != \"(\":\n command_str_left += \",\"\n command_str_right += \",\"\n if flg_string_type: command_str_right += \"'\"\n command_str_left += field_name\n command_str_right += str(field_value)\n if flg_string_type: command_str_right += \"'\"\n command_str = command_str_left + \")\" + command_str_right + \")\"\n\n return command_str",
"def get_sql_statement_insert_data_for_row(datatable_row: ModelDatatableRow):\n if datatable_row is None:\n return None\n\n # sql = ''' INSERT INTO projects(name,begin_date,end_date)\n # VALUES(?,?,?) '''\n # begin of statement\n sql_statement = ''' INSERT INTO ''' + \\\n datatable_row.get_datatable_name() + '''('''\n\n # get column - value pairs\n row_entries = datatable_row.get_row_entries()\n column_names_string = ''\n values_string = ''\n index = 0\n while index < len(row_entries):\n row_entry: ModelDatatableRowEntry = row_entries[index]\n # check if column name is existing\n if row_entry.get_column_name() is None:\n logger_log(\"failed. column name is NONE. datatable: \" + datatable_row.get_datatable_name())\n return None\n # check if row entry value is existing\n if row_entry.get_value() is None:\n row_entry.value = \"\" # set empty string if value is not existing\n\n column_names_string = column_names_string + row_entry.get_column_name()\n values_string = values_string + \"\\'\" + row_entry.get_value() + \"\\'\"\n if index < len(row_entries) - 1:\n column_names_string = column_names_string + ''', ''' # add comma if index is not the last element\n values_string = values_string + ''', ''' # add comma if index is not the last element\n index += 1\n\n # add column names\n sql_statement = sql_statement + column_names_string\n\n # add end of first part: \"INSERT INTO projects(name,begin_date,end_date)\"\n sql_statement = sql_statement \\\n + ''')'''\n\n # add values and end of statement\n sql_statement = sql_statement \\\n + ''' VALUES(''' \\\n + values_string \\\n + ');'\n\n logger_log(\"datatable name: \"\n + datatable_row.get_datatable_name()\n + \" - result: \"\n + sql_statement)\n return sql_statement",
"def get_table_insert_command_from_dict(table, dict):\n sql = 'INSERT OR REPLACE INTO ' + table\n sql += ' ('\n sql += ','.join(dict.keys())\n sql += ') VALUES ('\n sql += ','.join(map(wrap_with_strings, dict.values()))\n sql += ');'\n return sql",
"def form_insert_query(self, table_name, input_data, table_fields_names=None, table_fields_types=None):\n\n\t\t# creating first part of the query -> section with columns' names\n\t\tquery_table_structure = self.build_query_part(table_fields_names, table_fields_types, query_part=1)\n\n\t\t# creating second part of the query -> section with values\n\t\tquery_values = self.build_query_part(input_data, table_fields_types, query_part=2)\n\t\t\n\t\t# form query\n \t\tquery = 'INSERT INTO ' + table_name + ' ' + query_table_structure + ' VALUES ' + query_values\n\n\t\treturn query",
"def default_missing_values(cur, conn):\n for query in default_values_queries:\n cur.execute(query)\n conn.commit()",
"def _raw_sql(self, values):\n if isinstance(self.model._meta.pk, CharField):\n when_clauses = ' '.join([self._when(\"'{}'\".format(x), y) for (x, y) in values])\n else:\n when_clauses = ' '.join([self._when(x, y) for (x, y) in values])\n table_name = self.model._meta.db_table\n primary_key = self.model._meta.pk.column\n return 'SELECT CASE {}.\"{}\" {} ELSE 0 END'.format(table_name, primary_key, when_clauses)",
"def _insert_internal(self, cols, vals) :\n\n self.row_id += 1\n vals[0] = self.row_id\n\n if None in vals :\n cvs = list(zip(cols, vals))\n cvs = [cv for cv in cvs if cv[1] is not None]\n cs = [cv[0] for cv in cvs]\n vs = [cv[1] for cv in cvs]\n else :\n cs = cols\n vs = vals\n\n value_sql = ','.join([self._quoter(cols[i]) % str(vs[i]) \n for i in range(len(vs))])\n\n col_sql = ','.join(['\"%s\"' % c for c in cs])\n insert_sql = 'INSERT INTO \"%s\" (%s) VALUES (%s)' % (self.name, col_sql, value_sql)\n cur = self.con.cursor()\n cur.execute(insert_sql)",
"def _assemble(self):\n assert self._kw, \"Call values() first\"\n names = ','.join(list(self._kw))\n holders = ','.join(f'%({name})s' for name in self._kw)\n return (\n f'insert into {self._table} '\n f'({names}) values ({holders}) '\n f'returning {self._returning}'\n ), self._kw.copy()",
"def workbookToSqlStatements(workbook,table_name,sheet_name_0,sheet_name_splitter,sheet_name_1,cols_name,ints,length_varchar,min_row):\n create_sql_statement = \"\"\n create_sql_statement = f\"CREATE TABLE IF NOT EXISTS {table_name} \"\n create_sql_statement += f\"(id SERIAL PRIMARY KEY , {sheet_name_0} INT, {sheet_name_1} INT,\"\n cols = str(cols_name).strip(\"]\")\n cols = cols.strip(\"[\")\n for i in range(len(cols_name)):\n cols = cols.replace(\"'\",\"\")\n cols = cols.replace(\"'\",\"\")\n if i in ints:\n create_sql_statement += f\"{cols_name[i]} INT\"\n else:\n create_sql_statement += f\"{cols_name[i]} CHAR({length_varchar})\"\n\n if i != len(cols_name) - 1:\n create_sql_statement += \",\"\n create_sql_statement += \");\"\n\n\n insert_sql_statement = \"\"\n insert_sql_statement = f\"insert into {table_name} ({sheet_name_0},{sheet_name_1},{cols}) values \"\n weeks_inserted = 0\n\n for sheet in workbook.worksheets:\n sheet_name_list = str(sheet.title).split(sheet_name_splitter)\n year = sheet_name_list[0]\n week = sheet_name_list[1]\n weeks_inserted += 1\n for row in sheet.iter_rows(min_row=min_row,values_only=True):\n values = \"(\"\n values += f\"{year},{week},\"\n insert = True\n for i in range(len(cols_name)):\n try:\n if i in ints:\n\n cell = str(row[i])\n try:\n values += f\"{int(cell)}\"\n except:\n values += \"null\"\n else:\n if str(row[i]) != \"\" and str(row[i]) != \"None\":\n values += f\"'{str(row[i]).rstrip()}'\"\n else:\n insert = False\n\n if i != len(cols_name) - 1:\n values += \",\"\n else:\n break\n except:\n return print(\"error the length of the list cols_name exeed the length of max columns of the sheet\")\n\n values += \")\"\n\n if insert:\n insert_sql_statement += values + \",\\n\"\n temp = len(insert_sql_statement)\n insert_sql_statement = insert_sql_statement[:temp - 2]\n insert_sql_statement += \";\"\n\n #insert_sql_statement = insert_sql_statement.replace(\",\\n;\",\"\\n;\")\n return str(weeks_inserted), create_sql_statement, insert_sql_statement",
"def make_insert_row(table_str, attribute_value_dict): #works\n#Aanpassen zodat query niet uitgevoerd wordt als pk al bestaat\n #initialize input for string formatting\n attributes_string = \"(\"\n values_list = []\n #retrieve attributes and values from dictionary and add them to the string\n for key in attribute_value_dict:\n values_list += [attribute_value_dict[key]]\n attributes_string += \"%s, \" % key\n attributes_string = attributes_string[:(len(attributes_string)-2)]\n attributes_string += ')'\n values = str(tuple(values_list))\n sql = \"\"\"INSERT INTO `%s` %s VALUES %s \"\"\" % (table_str, attributes_string, values)\n return sql",
"def test_inserts_w_all_nulls(\n self, connection, metadata, sort_by_parameter_order, datatype\n ):\n t = Table(\n \"t\",\n metadata,\n Column(\"id\", Integer, Identity(), primary_key=True),\n Column(\"data\", datatype),\n )\n metadata.create_all(connection)\n result = connection.execute(\n insert(t).returning(\n t.c.id,\n sort_by_parameter_order=bool(sort_by_parameter_order),\n ),\n [{\"data\": None}, {\"data\": None}, {\"data\": None}],\n )\n eq_(set(result), {(1,), (2,), (3,)})",
"def generate_sql(field_map, input_data, macro_map={}):\n if input_data == []:\n return SQL_HEADER\n\n entity = Entity(input_data)\n\n where_clause = ''\n if entity.is_simple_clause():\n where_clause = transpile_simple_clause(field_map, entity, macro_map)\n elif entity.is_compound_clause():\n where_clause = transpile_compound_clause(field_map, entity, macro_map)\n elif entity.is_macro():\n where_clause = transpile_macro(field_map, entity, macro_map)\n else:\n raise TranspilerError(\"Input (%s) not recognized.\" % (input_data))\n\n return SQL_HEADER + ' WHERE '+ where_clause",
"def _get_table_sql_columns(columns=[]):\n\n\tif len(columns) == 0:\n\t\tsql_columns = '*'\n\n\telse: \n\t\tsql_columns = \",\".join(columns)\n\n\treturn sql_columns",
"def insert(self, table, insert_values, cursor=None, nolog=True):\n if isinstance(insert_values, list):\n # we expect several insertion\n if self._engine != \"SQLite\":\n for d in insert_values:\n self.insert(table, d, cursor)\n else:\n if isinstance(insert_values[0], dict):\n ins = {}\n for k in insert_values[0]:\n ins[k] = \":\" + k\n sql = self._insert_sql(table, ins)\n else:\n q = tuple('?' for _ in insert_values[0])\n sql = self._insert_sql(table, q).replace(\"'\", \"\")\n\n sql = sql.replace(\"'\", \"\")\n try:\n if not nolog: # pragma: no cover\n if len(sql) > 1000:\n self.LOG(\"SQLs\", sql[:1000])\n else:\n self.LOG(\"SQLs\", sql)\n self._connection.executemany(sql, insert_values)\n return \"\"\n except Exception as e:\n raise ExceptionSQL( # pylint: disable=W0707\n \"Unable to execute a SQL request (3) (cursor %r) (file %r)\" %\n (str(cursor), self.get_file()), e, sql)\n\n elif isinstance(insert_values, dict):\n sql = self._insert_sql(table, insert_values)\n\n try:\n if not nolog: # pragma: no cover\n if len(sql) > 1000:\n self.LOG(\"SQLs\", sql[:1000])\n else:\n self.LOG(\"SQLs\", sql)\n if cursor is not None:\n cursor.execute(sql)\n else:\n if self._buffer_insert_s > 0:\n self._buffer_insert.append(sql)\n\n if len(self._buffer_insert) >= self._buffer_insert_s:\n for s in self._buffer_insert:\n self._connection.execute(s)\n del self._buffer_insert[:]\n else:\n self._connection.execute(sql)\n\n return sql\n except Exception as e:\n raise ExceptionSQL( # pylint: disable=W0707\n \"unable to execute a SQL request (2) (cursor %r) (file %r)\" %\n (str(cursor), self.get_file()), e, sql)\n\n else:\n raise DBException( # pragma: no cover\n \"insert: expected type (list of dict or dict) instead of %s\" %\n (str(\n type(insert_values))))",
"def info_insert(tbl_mgr, table_str, row_arr):\n val_str = \"(\"\n\n for col in range(len(row_arr)):\n if col > 0:\n val_str += \", \"\n if row_arr[col] is None:\n val_str += \"NULL\"\n else:\n val_str += \"'\" + row_arr[col] + \"'\"\n val_str += \")\"\n\n return tbl_mgr.insert_stmt(table_str, val_str)",
"def to_insert(table, d):\n\n columns = []\n args = []\n for key, val in d.items():\n columns.append('\"{}\"'.format(key))\n args.append(val)\n stmt = 'insert into {table} ({columns}) values ({params})'.format(\n table=table,\n columns=', '.join(columns),\n params=', '.join(f'${i + 1}' for i in range(len(columns)))\n )\n return (stmt, args)",
"def insert_records(cursor,table_schema,table_name,column_names,records, checkCols = True):\n # Check if each column exists and add missing ones\n if checkCols:\n for cn in column_names:\n add_column_to_staging_table(cursor, table_schema, table_name, cn)\n\n insert_base = \"INSERT INTO \" + table_schema + \".\" + table_name + \" (\" + \",\".join(column_names) + \") VALUES \"\n insert_values = []\n for record in records:\n insert_value = \"('\" + \"','\".join(str(x).replace(\"'\",\"''\") for x in record) + \"')\" \n insert_values.append(insert_value)\n insert_record = insert_base + \",\".join(insert_values) + \";\"\n if records != []:\n cursor.execute(insert_record.replace(\",TO,\",\",TOV,\"))\n print(\"Inserted \" + str(len(records)) + \" records into \" + table_schema + \".\" + table_name)",
"def sql_column_builder(data=None):\n tmp = []\n for key, value in data.iteritems():\n if isinstance(value, basestring):\n tmp.append(\"{key}='{value}'\".format(key=key, value=value))\n else:\n tmp.append(\"{key}={value}\".format(key=key, value=value))\n\n column_string = ', '.join(tmp)\n return column_string",
"def _delete_null_columns(self):\r\n ds = DeleteStatement(self.column_family_name)\r\n deleted_fields = False\r\n for _, v in self.instance._values.items():\r\n col = v.column\r\n if v.deleted:\r\n ds.add_field(col.db_field_name)\r\n deleted_fields = True\r\n elif isinstance(col, Map):\r\n uc = MapDeleteClause(col.db_field_name, v.value, v.previous_value)\r\n if uc.get_context_size() > 0:\r\n ds.add_field(uc)\r\n deleted_fields = True\r\n\r\n if deleted_fields:\r\n for name, col in self.model._primary_keys.items():\r\n ds.add_where_clause(WhereClause(\r\n col.db_field_name,\r\n EqualsOperator(),\r\n col.to_database(getattr(self.instance, name))\r\n ))\r\n self._execute(ds)",
"def insert_values():\n pass"
] | [
"0.71312165",
"0.70737326",
"0.6936334",
"0.6610949",
"0.6493307",
"0.6297049",
"0.62800425",
"0.62800425",
"0.6227936",
"0.6121583",
"0.60976523",
"0.60733664",
"0.60127187",
"0.59995824",
"0.59413385",
"0.5925546",
"0.5920737",
"0.5906229",
"0.586013",
"0.58541316",
"0.5853796",
"0.5849819",
"0.5831716",
"0.5826773",
"0.5824611",
"0.577806",
"0.57700497",
"0.57422245",
"0.5738727",
"0.5728218"
] | 0.7548947 | 0 |
Check if SQLite3 module is threadsafe | def is_sqlite3_threadsafe():
try:
import sqlite3 as sql
conn = sql.connect(':memory:')
threadsafety = conn.execute('SELECT * FROM pragma_compile_options WHERE compile_options LIKE \'THREADSAFE=%\'').fetchone()[0]
conn.close()
if int(threadsafety.split("=")[1]) == 1:
return True
except Exception as exc: # pylint: disable=broad-except
LOG.error('Failed to check sqlite thread safe: {}', exc)
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parallel_safe(self):\n return True",
"def parallel_safe(self):\n\n return True",
"def check_db():\n try:\n conn = sqlite3.connect(DB_PATH)\n cursor = conn.cursor()\n cursor.execute(\"SELECT id FROM query LIMIT 1;\")\n conn.close()\n except:\n init_db()",
"def __virtual__():\n if not HAS_SQLITE3:\n return False\n return True",
"def is_running_py3():\n return sys.version_info >= (3, 0)",
"def IsSynchronized(self) -> bool:",
"def test_concurrent_access(self):\n num_threads = 4\n thread_pool = ThreadPool(num_threads)\n\n def test_func(x):\n \"\"\"Create, get, delete models.\"\"\"\n for i in range(32):\n handle = self.model_manager.create(name='%s-%s' % (x, i))\n self.assertTrue(\n handle in [m.handle for m in self.model_manager.models()])\n self.model_manager.delete(handle)\n self.assertTrue(\n handle not in\n [m.handle for m in self.model_manager.models()])\n return True\n for x in range(num_threads):\n thread_pool.add_func(test_func, x)\n thread_pool.join()\n self.assertTrue(len(self.model_manager.models()) == 0,\n 'Expecting no models to stick around')",
"def needs_commons_db(self):\n return False",
"def _db_is_locked(dbname):\n\n # first, check to see if lock file exists, if so, assume the file is locked\n lock_name = f\"{dbname}.lock\"\n if os.path.exists(lock_name):\n logging.debug(f\"{dbname} is locked\")\n return True\n\n # no lock file so try to read from the database to see if it's locked\n locked = None\n try:\n (conn, c) = _open_sql_file(dbname)\n c.execute(\"SELECT name FROM sqlite_master WHERE type='table' ORDER BY name;\")\n conn.close()\n logging.debug(f\"{dbname} is not locked\")\n locked = False\n except Exception as e:\n logging.debug(f\"{dbname} is locked\")\n locked = True\n\n return locked",
"def is_py3():\n return sys.version_info >= (3, 0)",
"def is_py3():\n return sys.version_info[0] == 3",
"def is_safe_to_upgrade():\n return not contains_locked_file(get_rlbot_directory())",
"def db_exists():\n global DB_TYPE\n global SQLITE_FILE\n\n if DB_TYPE == \"sqlite\":\n return Path(SQLITE_FILE).exists()",
"def _check_db(self) -> None:\n\n logger.debug(f\"\\n{clrs.WARNING}Checking DB{clrs.ENDC}\\n\")\n\n if self.table_name not in SUPPORTED_SECTIONS:\n raise SelectedTableError(\"Section Not Supported, Check Settings\")\n\n elif not self.db_name.exists():\n print(f\"Creating DB & Table {self.db_name.name}\")\n self._init_db()\n self._create_table()\n return None\n\n elif self.db_name.exists():\n result: List[Any] = self.query(\n \"SELECT * FROM sqlite_master WHERE name = ?\", [self.table_name]\n )\n\n if not result:\n print(f\"Creating table {self.table_name}\")\n self._create_table()",
"def is_py3() -> bool:\n return sys.version_info[0] == 3",
"def is_concurrent(self):\n return self.concurrent",
"def __initialization__(self):\n \n if self._mode != 'r':\n self._check_within_context_()\n \n if os.path.exists('%s_lock'%self._path) and self._mode == 'w':\n self.__special_exit = 'lock exists'\n raise Exception('It seems that database %s is currently being modified i.e. %s_lock file exists'%(self._path, self._path))\n \n \n #initialize self.columns\n self.initialize_columns()\n self.column_id = {elem: ii for ii,elem in enumerate(self._columns)}\n self.column_is_date = dict()\n for el in self._columns.keys():\n if 'date_' in el:\n self.column_is_date[el] = True\n else:\n self.column_is_date[el] = False\n\n \n #create db if it does not exist, otherwise simply open it\n if not os.path.exists(self._path):\n #this function creates the table => requires write priviledges\n self._check_writable_()\n if self._verbose > 0:\n print('TableManager database does not exist, creating it:\\n => %s'%self._path)\n self._conn = sqlite3.connect(self._path)\n self._cursor = self._conn.cursor()\n self._cursor.execute('CREATE TABLE FILEINFO (%s)'%(', '.join(['%s %s'%(el, self._columns[el]) for el in self._columns.keys()])))\n self._conn.commit()\n #lock database for writes (we already know mode == 'w')\n shutil.copy(self._path, '%s_lock'%self._path)\n else:\n self._conn = sqlite3.connect(self._path)\n self._cursor = self._conn.cursor()\n if self._mode == 'w':\n #create lock if writing is intended\n shutil.copy(self._path, '%s_lock'%self._path)\n #check columns in db vs columns in tablemanager\n self._cursor.execute('SELECT * FROM FILEINFO')\n columns_db = [el[0] for el in self._cursor.description]\n if set(columns_db) != set(self._columns.keys()):\n raise Exception('columns in db not consistent with columns in tablemanager')",
"def test_backend_impls(tmp_sqlite3_db_url: str):\n cnx_pool = create_connection_pool(f\"{tmp_sqlite3_db_url}\")\n assert \"?\" == cnx_pool.mung_symbol\n cnx = cnx_pool.lease()\n cnx.execute(test_sql.CREATE_TABLE, commit=True)\n for x in range(10):\n values = (str(uuid.uuid4()), str(uuid.uuid4()), x, x * 2)\n assert 1 == cnx.execute(test_sql.SIMPLE_INSERT, values)\n cnx.commit()\n with cnx.query(test_sql.SIMPLE_SELECT, (6,)) as res:\n res: ResultSet = res\n row = res.fetchone()\n assert [\"my_pk_col\", \"some_uuid\", \"col_bigint\", \"col_integer\"] == [r.name for r in res.description]\n assert (7, 14) == (row[2], row[3])\n assert 2 == len(res.fetchall())\n cnx_pool.release(cnx)\n cnx_pool.dispose()",
"def _get_connection(self) -> sqlite3.Connection:\n curr_thread = threading.get_ident()\n if curr_thread not in self.conn or self.conn[curr_thread] is None:\n conn = sqlite3.connect(self.db_path, check_same_thread=False)\n conn.row_factory = sqlite3.Row\n self.conn[curr_thread] = conn\n return self.conn[curr_thread]",
"def query_vault_db():\n try:\n get_session(True).execute('SELECT * FROM sqlite_master')\n return True\n except exc.DatabaseError as e:\n # print('Database Error: %s' % e)\n # print('Most likely, this error is due to an invalid database key or a mis-configured sqlcipher3/libsqlcipher')\n return False",
"def check_dependencies():\n check_python_version()\n\n dependencies = [\"sqlite3\"]\n\n for dependency in dependencies:\n try:\n __import__(dependency)\n except ImportError as e:\n raise CuckooStartupError(\"Unable to import \\\"%s\\\"\" % dependency)\n\n return True",
"def isSQLite3(filename):\n from os.path import isfile, getsize\n\n if not isfile(filename):\n return False\n if getsize(filename) < 100: # SQLite database file header is 100 bytes\n return False\n else:\n fd = open(filename, 'rb')\n Header = fd.read(100)\n fd.close()\n\n if Header[0:16] == 'SQLite format 3\\000':\n return True\n else:\n return False",
"def db_exists(self):\n \n with self.connection:\n c = self.connection.cursor()\n c.execute(\"SELECT EXISTS(SELECT 1 FROM sqlite_master WHERE name=?)\", [PUBLICATIONS_TABLE])\n exists = c.fetchone()\n if(exists[0] == 1):\n return True\n else:\n return False",
"def setup_database(function) -> Callable:\n def wrapper(*args, **kwargs):\n db_path = current_app.config['DB_LOCATION']\n db_lock = current_app.config['DB_LOCK']\n db_lock.acquire()\n print('lock start')\n db = rocksdb.DB(db_path, rocksdb.Options(create_if_missing=True, max_open_files=10000),\n read_only=True)\n value = function(*args, **kwargs, db=db)\n del db\n db_lock.release()\n print('lock end')\n return value\n return wrapper",
"def needs_patch():\n return (IndexName is not None and\n hasattr(BaseDatabaseSchemaEditor, '_create_unique_sql'))",
"def test_py3(self):\n if sys.version_info < self.MIN_SUPPORTED_VERSION:\n return\n import miflora # noqa: F401 # pylint: disable=unused-import,import-outside-toplevel",
"def attempt_dummy_encrypted_db(db_path):\n\n engine = create_engine(\n 'sqlite+pysqlcipher://:' + create_temporary_secret() + '@//' + db_path,\n module=sqlcipher3)\n # engine = create_engine('sqlite:///' + db_path)\n connection = engine.connect()\n connection.execute('CREATE TABLE foo (a int)')\n connection.execute('INSERT INTO foo (a) VALUES (123)')\n result_proxy = connection.execute('SELECT * FROM foo')\n return True if result_proxy.fetchall() == [(123,)] else False",
"def conn_testing() -> sqlite3.Connection:\n database = Path(id_test)\n conn = __check_existence(database)\n with conn:\n return conn",
"def _isolate_db_query(self, query_fn, ret_lst, args, kwargs):\n conn = sqlite3.connect(\n self.db_filename,\n isolation_level=None,\n timeout=self.transaction_timeout)\n try:\n conn.execute(\"PRAGMA locking_mode=EXCLUSIVE;\").close()\n conn.execute(\"BEGIN EXCLUSIVE;\").close()\n\n # This fn can call things like: ret = conn.execute(qs).fetchall()\n # Remember that all cursors need to be closed!\n query_fn(conn, ret_lst, *args, **kwargs)\n\n conn.commit()\n except Exception:\n try:\n conn.rollback()\n except Exception:\n pass\n raise\n finally:\n try:\n conn.close()\n except Exception:\n pass",
"def test_grammar_cache_is_thread_safe() -> None:\n n_threads = 10\n lexer_ids = []\n stop = threading.Event()\n\n def check_cache_lexer_id() -> None:\n # Parse a dummy string to make sure the grammar cache is populated\n # (this also checks that multiple threads can parse in parallel).\n grammar_parser.parse(\"foo\")\n # Keep track of the ID of the cached lexer.\n lexer_ids.append(id(grammar_parser._grammar_cache.data[0]))\n # Wait until we are done.\n while not stop.is_set():\n time.sleep(0.1)\n\n # Launch threads.\n threads = []\n for i in range(n_threads):\n threads.append(threading.Thread(target=check_cache_lexer_id))\n threads[-1].start()\n\n # Wait until all threads have reported their lexer ID.\n while len(lexer_ids) < n_threads:\n time.sleep(0.1)\n\n # Terminate threads.\n stop.set()\n for thread in threads:\n thread.join()\n\n # Check that each thread used a unique lexer.\n assert len(set(lexer_ids)) == n_threads"
] | [
"0.62684345",
"0.6167844",
"0.56663895",
"0.5534087",
"0.547586",
"0.5453264",
"0.5426761",
"0.5393733",
"0.5359209",
"0.53327715",
"0.5332757",
"0.5324714",
"0.5302915",
"0.52833873",
"0.52824384",
"0.52329636",
"0.52141863",
"0.5197171",
"0.51928973",
"0.5184741",
"0.51477844",
"0.5144008",
"0.5106358",
"0.510445",
"0.5090685",
"0.5073623",
"0.5059549",
"0.5051557",
"0.5044819",
"0.5042092"
] | 0.8585573 | 0 |
Get token from Vault. | def get_token():
global vault_token
global vault_token_time
current_app.logger.info('************* GET TOKEN METHOD **************')
return 'root'
if validate_token():
vault_duration = None
try:
auth_type = current_app.config.get('VAULT_AUTH', 'TOKEN')
current_app.logger.info('*********** Auth Type: ' + auth_type)
if auth_type == 'TOKEN':
vault_token = current_app.config.get('VAULT_AUTH_TOKEN')
elif auth_type == 'USERPASS':
vault_token, vault_duration = authenticate_userpass()
elif auth_type == 'LDAP':
vault_token, vault_duration = authenticate_ldap()
elif auth_type == 'CERT':
vault_token, vault_duration = authenticate_certificate()
elif auth_type == 'GCP':
vault_token, vault_duration = authenticate_gcp()
elif auth_type == 'APPROLE':
vault_token, vault_duration = authenticate_approle()
else:
current_app.logger.info('Vault: VAULT_AUTH not configured correctly.')
raise RuntimeError('Vault: VAULT_AUTH not configured correctly.')
if vault_duration is not None:
vault_token_time = datetime.datetime.now() + datetime.timedelta(seconds=int(vault_duration))
current_app.logger.info('*********** TOKEN: ' + vault_token)
except ConnectionError as ConnError:
current_app.logger.info('Vault: There was an error while connecting to Vault server.')
raise ConnError
return vault_token | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def token(request: Request):\n return get_token()",
"def get_token():\n params = {'get_token': 'get_token'}\n return load_page(API, params=params, headers={'content-type': 'application/json'})['token']",
"def get(uid: int, token_id: int):\n\n token = Token.get(uid, token_id).as_dto().to_primitive()\n\n if token:\n return token.to_primitive()\n else:\n raise NotFound(\"Token Not Found\")",
"def token(self):\n if self.is_auth_needed():\n self.authorize()\n\n return self.get_from_cache('token')",
"def _get_token(self):\n return user.get_token()",
"def get_keystone_token(con):\n return con.get_token(con.session)",
"def get_token(self):\n\n try:\n return jwt.decode(self.fetch_token(), KEY, algorithms=['HS256'])\n except jwt.exceptions.DecodeError:\n raise InvalidToken",
"def get_token(self):\n self.token = self._session.fetch_token(\n token_url=CLOUD_URLS[\"get_token\"][1],\n client_id=self._client_id,\n client_secret=self._client_secret\n )",
"def getToken(self):\n \n raise NotImplementedError",
"def token():\n return os.environ.get('TOKEN', None)",
"def get_token(self):\n token = self._session.token\n return token",
"def get_token():\n return session.get('microsoft_token')",
"def get_token():\n return session.get('microsoft_token')",
"def token(self) -> str:\n return pulumi.get(self, \"token\")",
"def token(self) -> str:\n return pulumi.get(self, \"token\")",
"def token(self) -> str:\n return pulumi.get(self, \"token\")",
"def get_token(token_method, acc=None, vo=None, idt=None, pwd=None):\n if not acc:\n acc = request.environ.get('HTTP_X_RUCIO_ACCOUNT')\n if not vo:\n vo = request.environ.get('HTTP_X_RUCIO_VO')\n if not idt:\n idt = request.environ.get('SSL_CLIENT_S_DN')\n if not (acc and vo and idt):\n return None\n try:\n if pwd:\n token = token_method(acc, idt, pwd, 'webui', request.environ.get('REMOTE_ADDR'), vo=vo).get('token')\n else:\n token = token_method(acc, idt, 'webui', request.environ.get('REMOTE_ADDR'), vo=vo).get('token')\n return token\n except:\n return None",
"def meraki_vault_r_secret(mount, path):\n read_secret_result = client.secrets.kv.v1.read_secret(path=meraki_vault_path, mount_point=vault_mount_point)\n api_token = read_secret_result['data']['token']\n return api_token",
"def get_token(self):\n return self.__token",
"def get_token(self):\n return self.__token",
"def get_token():\n if os.path.exists(AUTH_TOKEN_PATH):\n with open(str(AUTH_TOKEN_PATH), 'r') as TokenObj:\n try:\n data = TokenObj.read()\n except (OSError, IOError) as e:\n echo(e)\n data = json.loads(data)\n token = data[\"token\"]\n return token\n else:\n echo(\"\\nYour token file doesn't exists.\")\n echo(\"\\nIt should be present at ~/.evalai/token.json\\n\")\n return None",
"def get_token(self, token_id):\n raise exception.NotImplemented() # pragma: no cover",
"def token(self):\n return self[\"token\"]",
"def get_token(self):\n if time.time() > self.expiration:\n # need to re-authenticate and get a new token and catalog\n self._authenticate()\n \n return self.token, self.catalog",
"def get_token(self):\n\n return self._token",
"def _get_auth_token(self):\n\n __logger__.debug(\"Getting auth Token\")\n return self.keystone_client.auth_ref['token']['id']",
"def get_token():\n url = settings.GENERATE_TOKEN_URL\n headers = {\"Authorization\": \"Basic {}\".format(settings.MPESA_APP_AUTHTOKEN)}\n response = get(url, headers)\n return response.json()",
"def get_token(self):\n auth_data = {\"auth\": {\"tenantName\": 'service',\n \"passwordCredentials\":{ \"username\": 'vsm',\n \"password\": self._password}}}\n\n auth_request = urllib2.Request(self._auth_url)\n auth_request.add_header(\"content-type\", \"application/json\")\n auth_request.add_header('Accept', 'application/json')\n auth_request.add_header('User-Agent', 'python-mikeyp')\n auth_request.add_data(json.dumps(auth_data))\n auth_response = urllib2.urlopen(auth_request)\n response_data = json.loads(auth_response.read())\n\n self._token = response_data['access']['token']['id']\n\n service_list = response_data['access']['serviceCatalog']\n for s in service_list:\n if s['type'] == 'vsm' and s['name'] == 'vsm':\n self._vsm_url = s['endpoints'][0]['publicURL']\n break\n\n url_id = self._vsm_url.split('/')[-1]\n return self._token + \"-\" + url_id",
"def get_auth_token():\n if CFG.auth_enabled:\n auth_token = get_keystone_token()\n else:\n auth_token = 'notrealtoken'\n\n return auth_token",
"def getJWTtoken(self):\n\n token = False\n try:\n res = self.s.get(self.url + 'tokens/jwt', auth=(self.username, self.password), verify=False)\n res.raise_for_status()\n except:\n logger.error(res)\n raise\n token = vsdModels.Token(**res.json())\n try:\n payload = jwt.decode(token.tokenValue, verify=False)\n\n except jwt.InvalidTokenError as e:\n logger.error('token invalid, try using Basic Auth{0}'.format(e))\n raise\n\n return token"
] | [
"0.7275925",
"0.7074497",
"0.7003901",
"0.68835914",
"0.6838572",
"0.68307024",
"0.676279",
"0.67362076",
"0.6676773",
"0.666665",
"0.66557",
"0.6624486",
"0.6624486",
"0.65986884",
"0.65986884",
"0.65986884",
"0.65887886",
"0.65849286",
"0.6560688",
"0.6560688",
"0.6544694",
"0.6538761",
"0.65369374",
"0.6531362",
"0.65294576",
"0.6517663",
"0.64785665",
"0.64619076",
"0.64567167",
"0.6438627"
] | 0.80251646 | 0 |
Authenticate with user and password, default auth path is LDAP | def authenticate_ldap():
return _userpwd_auth(current_app.config.get('VAULT_AUTH_PATH', 'ldap')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def authenticate(self, username=None, password=None, **kwargs):\n logging.debug(\"LDAP authenticatation: username=%s\" % username)\n is_active = True # User activity flag\n is_superuser = False # Superuser flag\n # Prepare template context\n context = {\n \"username\": self.q(username),\n \"user\": self.q(username)\n }\n if \"@\" in username:\n u, d = username.split(\"@\", 1)\n context[\"user\"] = self.q(u)\n context[\"domain\"] = self.q(d)\n context[\"domain_parts\"] = [self.q(p) for p in d.split(\".\")]\n try:\n # Prepare LDAP client\n client = ldap.initialize(self.server)\n if self.start_tls:\n client.start_tls_s()\n # Bind anonymously or with technical user to resolve username\n self.ldap_bind(client,\n self.bind_dn if self.bind_dn else None,\n self.bind_password if self.bind_password else None\n )\n # Search for user\n base = self.expand_template(self.users_base, context)\n filter = self.expand_template(self.users_filter, context)\n logging.debug(\"LDAP Search: filter: %s, base: %s\" % (filter, base))\n ul = client.search_s(base, ldap.SCOPE_SUBTREE, filter,\n [\"sn\", \"givenname\", \"mail\"])\n if len(ul) == 0:\n # No user found\n logging.error(\"LDAP user lookup error. User '%s' is not found\" % username)\n return None\n if len(ul) > 1:\n # Mistake in LDAP schema\n logging.error(\"LDAP schema error. More than one user returned for '%s'\" % username)\n return None\n dn, attrs = ul[0]\n logging.debug(\"LDAP search returned: %s, %s\" % (str(dn), str(attrs)))\n # Populate context with DN\n context[\"dn\"] = dn\n # Try to authenticate\n client = ldap.initialize(self.server)\n if self.start_tls:\n client.start_tls_s()\n self.ldap_bind(client, dn, password)\n # Check user is in required group\n if self.required_group:\n base = self.expand_template(self.required_group, context)\n filter = self.expand_template(self.requred_filter, context)\n logging.debug(\"LDAP checking user '%s' in group '%s'. filter: %s\" % (dn, base, filter))\n ug = client.search_s(base, ldap.SCOPE_BASE, filter, [])\n is_active = len(ug) > 0\n if not is_active:\n logging.debug(\"Disabling user '%s'\" % username)\n # Check user is superuser\n if self.superuser_group:\n base = self.expand_template(self.superuser_group, context)\n filter = self.expand_template(self.superuser_filter, context)\n logging.debug(\"LDAP checking user '%s' in group '%s'. filter: %s\" % (dn, base, filter))\n ug = client.search_s(base, ldap.SCOPE_BASE, filter, [])\n is_superuser = len(ug) > 0\n if is_superuser:\n logging.debug(\"Granting superuser access to '%s'\" % username)\n except ldap.LDAPError, why:\n logging.error(\"LDAP Error: %s\" % str(why))\n return None\n logging.debug(\"LDAP user '%s' authenticated. User is %s\" % (username,\n {True: \"active\", False: \"disabled\"}[is_active]))\n attrs = self.search_to_unicode(attrs)\n # Successfull bind\n user = self.get_or_create_db_user(username=username,\n is_active=is_active,\n is_superuser=is_superuser,\n first_name=attrs.get(\"givenName\"),\n last_name=attrs.get(\"sn\"),\n email=attrs.get(\"mail\"))\n # Authentication passed\n return user",
"def test_auth_test(self):\n backend = LdapBackend()\n backend.authenticate(None, username=\"apple\", password=\"ffffff\")",
"def authenticate(self, passwd):\n return self.easyauth(dict(self), passwd)",
"def ldap_authentication(email, password):\n conn = _ldap_server_connect()\n\n users = conn.search(search_base=current_app.config['LDAP_BASE_DN'],\n search_filter='(mail={email})'.format(email=email), attributes='dn')\n\n if users and len(conn.entries) >= 1:\n return conn.rebind(conn.entries[0].entry_dn, password)",
"def authenticate(self, conf, login, password):\n\n if not password:\n return False\n\n entry = False\n filter = filter_format(conf['ldap_filter'], (login,))\n try:\n results = self.query(conf, filter)\n\n # Get rid of (None, attrs) for searchResultReference replies\n results = [i for i in results if i[0]]\n if results and len(results) >= 1:\n dn = results[0][0]\n conn = self.connect(conf)\n conn.simple_bind_s(dn, password)\n conn.unbind()\n entry = results[0]\n except ldap.INVALID_CREDENTIALS:\n return False\n except ldap.LDAPError, e:\n _logger.error('An LDAP exception occurred: %s', e)\n return entry",
"def ldap_login(self, username, password):\n if settings.LDAP_USERS and username not in settings.LDAP_USERS:\n logging.warning(f\"User {username} not allowed for LDAP login\")\n return False\n LDAP_SERVER = settings.LDAP_SERVER\n # Create fully qualified DN for user\n LDAP_DN = settings.LDAP_LOGIN_DN.replace(\"{username}\", username)\n logger.debug(f\"LDAP dn: {LDAP_DN}\")\n # disable certificate check\n ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_ALLOW)\n\n # specify certificate dir or file\n if settings.LDAP_CERT_DIR:\n ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, settings.LDAP_CERT_DIR)\n if settings.LDAP_CERT_FILE:\n ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, settings.LDAP_CERT_FILE)\n try:\n # build a client\n ldap_client = ldap.initialize(LDAP_SERVER)\n ldap_client.set_option(ldap.OPT_REFERRALS, 0)\n # perform a synchronous bind to test authentication\n ldap_client.simple_bind_s(LDAP_DN, password)\n logger.info(f\"User '{username}' successfully authenticated via LDAP\")\n ldap_client.unbind_s()\n return True\n except (ldap.INVALID_CREDENTIALS, ldap.NO_SUCH_OBJECT):\n ldap_client.unbind()\n logger.warning(\"LDAP: wrong username or password\")\n except ldap.SERVER_DOWN:\n logger.warning(\"LDAP server not available\")\n except ldap.LDAPError as e:\n if isinstance(e, dict) and \"desc\" in e:\n logger.warning(f\"LDAP error: {e['desc']}\")\n else:\n logger.warning(f\"LDAP error: {e}\")\n return False",
"def authenticate_userpass():\n return _userpwd_auth(current_app.config.get('VAULT_AUTH_PATH', 'userpass'))",
"def authenticate(self, username, password):\n auth = (username, password)\n res = requests.get(\n self.normalize_admin_url(\"authenticate\"),\n headers={\"user-agent\": self.u_agent},\n auth=auth,\n verify=False,\n )\n if res.status_code == 200:\n # authentication ok, keep authentication info for future use\n self.auth = auth\n return Response(0, \"Successfully logged in\")\n elif res.status_code == 401:\n try:\n val = res.json()\n except ValueError:\n val = \"Login credentials not accepted\"\n return Response(401, val)\n else:\n return Response(res.status_code, res.content)",
"def authenticate(credentials):",
"def authenticate(self, uri):\n return self.username, self.password",
"def auth(username='', pw='', session=''):\n if (len(username) > 0 and len(pw) > 0) or len(session) > 0:\n url = wwl.server(secure=True)\n f = dict()\n if len(username) > 0:\n f['username']=username\n f['pw']=pw\n else:\n f['session']=session\n form_data = urllib.urlencode(f)\n url = wwl.server() + '/users/auth'\n result = urllib2.urlopen(url, form_data)\n tt = result.read()\n return tt\n else:\n return ''",
"def authenticate(self, username: str, password: str) -> Optional[str]:",
"def authenticate(self, username: str, password: str) -> Optional[str]:",
"def authenticate(self):\n self.login(closet.app.config['USERNAME'],\n closet.app.config['PASSWORD'])",
"def authenticate(self, username, password):\n user = self.db.get_user(username)\n print(user)\n\n if user is None:\n self.__deny_state()\n\n if not self.argon2.verify(user[1], password):\n self.__deny_state()\n\n self.__accept_state()",
"def authenticate(self, cred): \n auth_helper = AuthHelper.AuthHelper(self.context)\n return auth_helper.auth(cred)",
"def authenticate(self, username, password):\n return None",
"def get_auth(self, username, password):\n raise NotImplementedError()",
"def ldap_login(self, user_name, password) -> [bool, str]:\n # the following is the user_dn format provided by the ldap server\n # user_dn = \"uid=\" + username + \",ou=someou,dc=somedc,dc=local\"\n user_dn = f\"CN={user_name},CN=Users,DC=ms,DC=ds,DC=uhc,DC=com\"\n\n # adjust this to your base dn for searching\n connect = ldap.initialize(self.LDAP_SERVER, bytes_mode=False)\n search_filter = f'(&(uid={user_name}))'\n search_attr_list = ['displayName', 'memberOf']\n special_success = False\n try:\n # if authentication successful, get the full user datafuid\n connect.bind_s(user_dn, password)\n special_success = True\n result = connect.search_s(self.BASE_DISTINGUISHED_NAME, ldap.SCOPE_SUBTREE, search_filter, search_attr_list)\n print(\"result\", result)\n return self._allowed_access(connect, user_name)\n except Exception as err:\n print(\"authentication error\", err)\n if special_success and user_name in ['erospide', 'tmanni4']:\n # force success\n return True, user_name\n finally:\n connect.unbind_s()\n return False, ''",
"def simple_auth(self, name, password, user=None):\n if not self._is_initialized():\n return self._not_initialized_deferred()\n def _do_auth(res=None, state=\"entry\", user=None):\n if state == 'entry':\n d = self.get_user_record(name)\n d.addCallback(_do_auth, state=\"got_user\", user=None)\n elif state == 'got_user':\n if res is None:\n lg.debug(\"Authentication failed for user '%s': user\"\\\n \"does not exist\" %name)\n return AuthResult(AuthResult.INVALID_CREDENTIALS, name)\n d = self._get_password_credential(name,\n Directory.USER_PRINCIPAL)\n d.addCallback(_do_auth, state=\"got_credential\", user=res)\n elif state == 'got_credential':\n if res is None:\n lg.debug(\"Authentication failed for user '%s': user\"\\\n \"has no password set\" %name)\n return AuthResult(AuthResult.INVALID_CREDENTIALS, name)\n pwcred = res\n if pwcred.check_password(password):\n d = self.get_group_membership(\n Directory.USER_PRINCIPAL_GROUP, user.name)\n d.addCallback(_do_auth, state=\"got_groups\", user=user)\n else:\n lg.debug(\"Authentication failed for user '%s': invalid \"\\\n \"password\" %name)\n return AuthResult(AuthResult.INVALID_CREDENTIALS, name)\n elif state == 'got_groups':\n ret = AuthResult(AuthResult.SUCCESS, name)\n ret.groups = ret.groups | set(res)\n for grp in res:\n if self._nox_group_to_roles.has_key(grp):\n ret.nox_roles = ret.nox_roles | \\\n set((self._nox_group_to_roles[grp],))\n return ret\n else:\n raise Exception(\"Invalid State\")\n return d\n return _do_auth()",
"def authenticate(self):\n self.connection.authenticate()",
"def login_active():\r\n try:\r\n assert data.user is not None\r\n assert data.passwd is not None\r\n assert not '*' in data.user\r\n assert not '@' in data.user\r\n except:\r\n raise 'login information not set or are incorrect'\r\n\r\n try:\r\n print data.dc2\r\n l = init_ldap(data.user + data.usersuffix, data.passwd, data.dc2)\r\n login = (\r\n search_ldap(l, data.userFilter2 % data.user, data.baseDN2,\r\n attributes=['sAMAccountName'])\r\n )[0]\r\n except Exception as e:\r\n error(\"Active Directory access denied or user doesn't exists\")\r\n raise e\r\n\r\n warning('%s logged in to the Active Directory' % login['sAMAccountName'])\r\n \r\n## def __enter__():\r\n## return l\r\n## def __exit__():\r\n## l.unbind_s()\r\n l.__enter__ = lambda: l\r\n l.__exit__ = lambda x,y,z: l.unbind_s()\r\n return l",
"def get(self):\n\n\t\trequest = user_auth_parser.parse_args(strict=True)\n\n\t\tresult = Authenticator.authenticate(\n\t\t\trequest[\"username\"],\n\t\t\trequest[\"password\"]\n\t\t)\n\n\t\treturn result",
"def easyauth(cls, u, passwd):\n if u and all(key in u for key in ['username', 'salt', 'uhash']):\n return Account.authenticate(u['username'], passwd,\n u['salt'], u['uhash'])\n raise TypeError(\"Account._auth expects user object 'u' with \" \\\n \"keys: ['salt', 'uhash', 'username']. \" \\\n \"One or more items missing from user dict u.\")",
"def authenticate(username, password):\n user = User.find_by_username(username)\n if user and safe_str_cmp(user.password, password):\n return user",
"def authenticate(self, username, password):\n\n # call the server authenticate method\n result = self.client.service.authenticate(username, password)\n\n return result",
"def authenticate(self, password):\n request = self.request(Message.SERVERDATA_AUTH, unicode(password))\n with self.response_to(request) as response:\n if response.id == -1:\n raise AuthenticationError\n self.is_authenticated = True",
"def auth_authenticate():\n data = {'LoginName': username, 'Password': password}\n parameters = data_to_json(data)\n url = base_url + 'general/authentication/authenticate'\n response = make_request(url, parameters)\n r_value = ''\n if response['Status'] == 0:\n r_value = response['Value']['Token']\n return r_value",
"def authenticate(self):\n try:\n auth_header = self.basic_token\n username, password = decode(auth_header)\n\n user_principal = None\n allowlisted_users = Environment().get_allowlisted_users()\n if allowlisted_users is not None:\n password_from_allowlist = allowlisted_users.get(username)\n if password_from_allowlist is None or password_from_allowlist != password:\n logger.log_error(\"Invalid user credentials provided\")\n raise AuthenticationError(\"Invalid user credential\")\n else:\n raise AuthenticationError(\"No whitelisted users found to authenticate against\")\n\n if Environment().is_kerberos_enabled():\n user_principal = self.get_user_principal(username)\n key_tab_path = Environment().get_hdfs_keytab_file_path()\n logger.log_info(\"Minting a kerberos ticket for principal {} using keytab {}\".format(user_principal, key_tab_path))\n if key_tab_path is None or user_principal is None:\n raise AuthenticationError(\"Keytab file or kerberos principal missing\")\n returncode = KerberosUtil.renew_kinit(key_tab_path, user_principal)\n logger.log_info('kinit return code:' + str(returncode))\n\n return username, user_principal\n except Exception as e:\n logger.log_exception(\"Failed while authenticating user\", exc_info=True)\n raise AuthenticationError(str(e))",
"def authenticateFor(self, path=None, atts=None, basepath=None):\n if not path:\n path = self._formRequestedPath()\n\n # If the intended destination is a script, cache the inputs for our\n # for keeping during our trip to the login server\n self._cacheInputs()\n\n return_url = self._formURLbase(basepath) + path\n\n redirect = self.request.requestAuthentication(return_url, atts)\n self.cookie = self.createCookie(self.sessionid, self.realm)\n print >> self.estrm, \\\n \"Requesting authentication for session=%s\" % self.sessionid\n self.redirect(redirect, True)"
] | [
"0.71238625",
"0.7024816",
"0.68941444",
"0.68333524",
"0.682195",
"0.6764249",
"0.67410976",
"0.66589737",
"0.6557559",
"0.65375555",
"0.65371275",
"0.6479368",
"0.6479368",
"0.64316696",
"0.6416964",
"0.63971233",
"0.63791126",
"0.63758403",
"0.63447416",
"0.63050395",
"0.63032126",
"0.62415946",
"0.6236543",
"0.6221502",
"0.62060046",
"0.61926",
"0.6181668",
"0.61599594",
"0.61528206",
"0.61410415"
] | 0.8045247 | 0 |
Generate GCP JWT for Vault authentication. | def _generate_gcp_jwt():
role = current_app.config.get('VAULT_AUTH_ROLE')
account = current_app.config.get('VAULT_AUTH_ACCOUNT')
if role and account:
headers = {'Metadata-Flavor': 'Google'}
url = 'http://metadata/computeMetadata/v1/instance/service-accounts/' + account + '/identity'
try:
data = [('audience', current_app.config.get('VAULT_URL') + '/vault/' + role), ('format', 'full')]
resp = requests.post(url, headers=headers, data=data)
if resp.status_code != 200 and resp.status_code != 204:
current_app.logger.info('Vault: ' + resp.text)
raise Exception('Vault GCP Auth: Issues retrieving JWT.')
return resp.text
except ConnectionError as ConnError:
current_app.logger.info('Vault: There was an error while connecting to GCE metadata.')
raise RuntimeError('Vault: There was an error while connecting to GCE metadata.\n{}'.format(ConnError))
else:
raise RuntimeError('Vault Config: Role and Service Account not set.') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _generate_jwt_token(self):\n payload = jwt_payload_handler(self)\n token = jwt_encode_handler(payload)\n return token",
"def _generate_jwt_token(self):\n import jwt\n from datetime import datetime, timedelta\n from django.conf import settings\n\n dt = datetime.now() + timedelta(days=60)\n\n token = jwt.encode({\n 'id': self.pk,\n 'username': self.username,\n 'exp': int(dt.strftime('%s')),\n }, settings.SECRET_KEY, algorithm='HS256')\n # print(token)\n return token",
"def _generate_jwt_token(self):\n dt = datetime.now() + timedelta(days=60)\n\n token = jwt.encode({\n 'id': self.pk,\n 'exp': int(dt.strftime('%s'))\n }, settings.SECRET_KEY, algorithm='HS256')\n\n return token.decode('utf-8')",
"def get_signed_jwt(self, google_identity, scope):\n # Get the broker's service account email and access token\n broker_email, broker_token = self.get_broker_service_account_details()\n\n # Create a JWT\n iat = datetime.utcnow()\n exp = iat + timedelta(seconds=settings.JWT_LIFE)\n jwt = {\n 'scope': scope,\n 'aud': 'https://www.googleapis.com/oauth2/v4/token',\n 'iat': int(iat.timestamp()),\n 'exp': int(exp.timestamp()),\n }\n\n if self.broker_issuer:\n jwt['sub'] = google_identity\n jwt['iss'] = broker_email\n service_account = broker_email\n else:\n jwt['iss'] = google_identity\n service_account = google_identity\n\n # Sign the JWT\n response = requests.post(\n url='https://iam.googleapis.com/v1/projects/-/serviceAccounts/' + service_account + ':signJwt',\n headers={'Authorization': 'Bearer ' + broker_token},\n data={'payload': json.dumps(jwt)}\n )\n response = response.json()\n if 'error' in response:\n raise Exception(response['error']['message'])\n return response['signedJwt']",
"def generate_jwt(sa_keyfile,\n sa_email='[email protected]',\n audience='your-service-name',\n expiry_length=3600):\n\n now = int(time.time())\n\n # build payload\n payload = {\n 'iat': now,\n # expires after 'expirary_length' seconds.\n \"exp\": now + expiry_length,\n # iss must match 'issuer' in the security configuration in your\n # swagger spec (e.g. service account email). It can be any string.\n 'iss': sa_email,\n \"scope\": \"https://www.googleapis.com/auth/cloud-platform\",\n # aud must be either your Endpoints service name, or match the value\n # specified as the 'x-google-audience' in the OpenAPI document.\n 'aud': 'https://oauth2.googleapis.com/token',\n # sub and email should match the service account's email address\n 'sub': sa_email,\n 'email': sa_email\n }\n\n # sign with keyfile\n signer = google.auth.crypt.RSASigner.from_service_account_file(sa_keyfile)\n jwt = google.auth.jwt.encode(signer, payload)\n\n return jwt",
"def generate_jwt(self):\n\n # Generate a random token\n random_token = secrets.token_hex(12)\n\n # Update database\n self.user_in_db.update({'token': random_token})\n User.users_db.put(self.user_in_db)\n\n # Create timestamps for the token\n generated = time.time()\n expires = generated + TWO_WEEKS\n\n # Return the generated jwt\n return manage_tokens.encode({\n 'email': self.email,\n 'token': random_token,\n 'generated': generated,\n 'expires': expires,\n })",
"def to_jwt(self, key=None, algorithm=\"\", lev=0):\n _jws = JWS(self.to_json(lev), alg=algorithm, typ='JWT')\n return _jws.sign_compact(key)",
"def generateAuthToken(self):\n try:\n payload = {\n 'exp': datetime.utcnow() + timedelta(days=0, minutes=30),\n 'iat': datetime.utcnow(),\n 'sub': self.id\n }\n return jwt.encode(payload, current_app.config['SECRET_KEY'], algorithm='HS256').decode()\n except Exception as error:\n print(error)\n return error",
"def create_jwt(self, audience: List[str], additional_claims=None) -> str:\n iat = time.time()\n exp = iat + self.lifetime\n payload = additional_claims or {}\n payload.update({'iss': self.credentials[\"client_email\"],\n 'sub': self.credentials[\"client_email\"],\n 'aud': audience,\n 'iat': iat,\n 'exp': exp,\n 'scope': ['email', 'openid', 'offline_access'],\n 'email': self.credentials[\"client_email\"]\n })\n additional_headers = {'kid': self.credentials[\"private_key_id\"]}\n token = jwt.encode(\n payload,\n self.credentials[\"private_key\"],\n headers=additional_headers,\n algorithm='RS256').decode()\n return token",
"def generate_auth_token(self, expires_in=600):\n return jwt.encode(\n {'STULOGINID': self.STULOGINID, 'exp': time.time() + expires_in},\n app.config['SECRET_KEY'], algorithm='HS256')",
"def generate_token(self):\n\n try:\n # set up a payload with an expiration time\n payload = {\n 'exp': datetime.utcnow() + timedelta(minutes=45),\n 'iat': datetime.utcnow(),\n 'sub': self.id\n }\n # create the byte string token using the payload and the SECRET key\n jwt_string = jwt.encode(\n payload,\n app.config.get('SECRET_KEY'),\n algorithm='HS256'\n )\n return jwt_string\n\n except Exception as exception:\n # return an error in string format if an exception occurs\n return str(exception)",
"def generate_token(self):\n try:\n # set up a payload with an expiration time\n payload = {\n 'exp': datetime.utcnow() + timedelta(days=100),\n 'iat': datetime.utcnow(),\n 'sub': self.id\n }\n # create the byte string token using the payload and the SECRET key\n jwt_bytes = jwt.encode(\n payload,\n os.environ.get('SECRET', 'test'),\n algorithm='HS256'\n )\n return jwt_bytes.decode('utf-8')\n except Exception as e:\n # return an error in string format if an exception occurs\n raise Exception(str(e))",
"def generate_jwt_token(private_pem: bytes, app_id: int) -> str:\n private_key = jwcrypto.jwk.JWK.from_pem(private_pem)\n payload = {\"iss\": app_id}\n duration = datetime.timedelta(minutes=10)\n return python_jwt.generate_jwt(payload, private_key, \"RS256\", duration)",
"def generate_jwt_key(self):\n return ''.join(random.choice(string.ascii_uppercase +\n string.digits +\n string.ascii_lowercase) for _ in range(50))",
"def create_jwt(user_obj):\n return jwt.encode(\n user_serializer.GetUserInfoSerializer(user_obj).data,\n settings.SECRET_KEY, algorithm='HS256').decode('utf-8')",
"def get_jwt() -> str:\n LOGGER.debug(\"Retrieving JWT...\")\n\n args = {\n \"url\": \"{0}/auth\".format(CONFIG['dojot']['url']),\n \"data\": json.dumps({\n \"username\": CONFIG['dojot']['user'],\n \"passwd\": CONFIG['dojot']['passwd'],\n }),\n \"headers\": {\n \"Content-Type\": \"application/json\"\n },\n }\n\n res = DojotAPI.call_api(requests.post, args)\n\n LOGGER.debug(\".. retrieved JWT\")\n return res[\"jwt\"]",
"def authenticate_gcp():\n role = current_app.config.get('VAULT_AUTH_ROLE')\n\n if role:\n url = '{}/v1/auth/{}/login'.format(current_app.config.get('VAULT_URL'),\n current_app.config.get('VAULT_AUTH_PATH', 'gcp'))\n if url.split('//')[0].lower() == 'https:':\n verify = current_app.config.get('VAULT_CA')\n else:\n verify = ''\n\n jwt = _generate_gcp_jwt()\n json = {\"role\": '{}'.format(role), \"jwt\": '{}'.format(jwt)}\n resp = requests.post(url, json=json, verify=verify)\n\n if resp.status_code != 200 and resp.status_code != 204:\n current_app.logger.info('Vault: ' + resp.json()['errors'][0])\n return resp.json()['errors'][0], None\n\n return resp.json()['auth']['client_token'], resp.json()['lease_duration']\n\n else:\n raise RuntimeError('Vault Config: Vault Role not set.')",
"def build_jwt(payload: dict) -> str:\n if 'sub' not in payload.keys():\n raise ValueError('sub not in payload keys')\n jwt_fields = {\n 'iss': JWT_DOMAIN,\n 'sub': None,\n 'iat': datetime.datetime.utcnow(),\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=JWT_EXPIRATION_MINUTES),\n **payload\n }\n return jwt.encode(jwt_fields, key=SECRET_KEY, json_encoder=JSONDataEncoder).decode(encoding='UTF-8')",
"def generate_auth_token(self, expires_in=600):\n return jwt.encode(\n {'loginid': self.loginid, 'exp': time.time() + expires_in},\n app.config['SECRET_KEY'], algorithm='HS256')",
"def generate_token(user: dict):\n\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=1),\n 'iat': datetime.datetime.utcnow(),\n 'user': user\n }\n token = jwt.encode(\n payload,\n os.getenv('SECRET_KEY'),\n algorithm='HS256'\n )\n return token.decode('UTF-8')",
"def generate_auth_token(self):\n token = Serializer(\n app.config['API_SECRET_KEY'],\n expires_in=app.config['JWT_TOKEN_EXPIRATION']\n )\n return token.dumps({'id': self.id})",
"def generate_token(dictionary: dict, expiration: datetime.timedelta):\n\n dictionary['expiration'] = (datetime.datetime.utcnow() + expiration).timestamp()\n\n return jwt.encode(dictionary, current_app.config['TOKEN_SECRET_KEY'], algorithm='HS256')",
"def create_jwt(key, cert, systeminfo, metadata, requestdata):\n\n claims = {}\n claims[\"iat\"] = int(time.time())\n claims[\"systeminfo\"] = systeminfo\n claims[\"metadata\"] = metadata\n claims[\"requestdata\"] = requestdata\n\n logging.debug(\"Claims:{}\".format(json_encode(claims)))\n\n token = jwt.JWT(header=json_encode(jwt_header([cert])),\n claims=json_encode(claims))\n\n token.make_signed_token(key)\n\n return token.serialize()",
"def generate_token_string(token):\n if JWT_AUTH:\n return 'JWT {}'.format(token)\n else:\n return 'Token {}'.format(token)",
"def generate_token(self, user_id):\n try:\n # set up a payload with an expiration time\n payload = {\n 'exp': datetime.utcnow() + timedelta(minutes=10),\n 'iat': datetime.utcnow(),\n 'sub': user_id\n }\n # create the byte string encoded token using payload and SECRET key\n jwt_string = jwt.encode(\n payload,\n SECRET_KEY,\n algorithm='HS256'\n )\n return jwt_string\n except Exception as e:\n # return an error in string format if an exception occurs\n return str(e)",
"def make_id_jwt(sub, tenant=None):\n payload = {\"sub\": sub}\n if tenant is not None:\n payload[\"mender.tenant\"] = tenant\n payload = json.dumps(payload)\n payloadb64 = b64encode(payload.encode(\"utf-8\"))\n return \"bogus_header.\" + payloadb64.decode() + \".bogus_sign\"",
"def token_generate(self, user_id):\n try:\n payload = {\n 'exp': datetime.utcnow() + timedelta(minutes=200),\n 'iat': datetime.utcnow(),\n 'sub': user_id\n }\n encoded_token = jwt.encode(\n payload, current_app.config['SECRET_KEY'], algorithm='HS256'\n )\n return encoded_token\n\n except Exception:\n return str(Exception)",
"def token(self):\n return self._generate_jwt_token()",
"def token(self):\n return self._generate_jwt_token()",
"def token(self):\n return self._generate_jwt_token()"
] | [
"0.6614784",
"0.6614367",
"0.65258294",
"0.6461658",
"0.64035386",
"0.6360927",
"0.63380235",
"0.6322474",
"0.62901527",
"0.6256548",
"0.6229225",
"0.6217421",
"0.61592877",
"0.6156592",
"0.614825",
"0.6128425",
"0.60756314",
"0.60685194",
"0.6028773",
"0.60129017",
"0.5992166",
"0.5948027",
"0.59407574",
"0.5920836",
"0.591388",
"0.58893484",
"0.58846337",
"0.5857924",
"0.5857924",
"0.5857924"
] | 0.8443291 | 0 |
GCP JWT authentication function. | def authenticate_gcp():
role = current_app.config.get('VAULT_AUTH_ROLE')
if role:
url = '{}/v1/auth/{}/login'.format(current_app.config.get('VAULT_URL'),
current_app.config.get('VAULT_AUTH_PATH', 'gcp'))
if url.split('//')[0].lower() == 'https:':
verify = current_app.config.get('VAULT_CA')
else:
verify = ''
jwt = _generate_gcp_jwt()
json = {"role": '{}'.format(role), "jwt": '{}'.format(jwt)}
resp = requests.post(url, json=json, verify=verify)
if resp.status_code != 200 and resp.status_code != 204:
current_app.logger.info('Vault: ' + resp.json()['errors'][0])
return resp.json()['errors'][0], None
return resp.json()['auth']['client_token'], resp.json()['lease_duration']
else:
raise RuntimeError('Vault Config: Vault Role not set.') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def login():\n print(request.get_json())\n user = request.get_json()['username']\n passwd = request.get_json()['passwd']\n user_check = storage.get_user(User, user)\n if not user:\n return jsonify(message='missing value'), 401\n if not user_check:\n return jsonify(message='error'), 401\n if user == user_check.username and passwd == user_check.passwd:\n token = jwt.encode(\n {\n 'user_id': user_check.id,\n 'exp': datetime.utcnow() + timedelta(minutes=60)\n },\n current_app.config['SECRET_KEY']\n )\n token = token.decode('UTF-8')\n return jsonify(token=token), 200\n if user == user_check.username and passwd != user_check.passwd:\n return jsonify(message='authorization failed'), 403\n return jsonify(message='authorization failed'), 403",
"def login():\n data = request.get_json()\n user = User.authenticate(**data)\n\n if not user:\n return jsonify({ 'message': 'Invalid credentials', 'authenticated': False }), 401\n \n token = jwt.encode(\n {\n 'exp': datetime.now() + timedelta(minutes=90),\n 'iat': datetime.now(),\n 'sub': user.user_id\n },\n current_app.config['SECRET_KEY'],\n algorithm='HS256')\n #print(token)\n user_id = data['user_id']\n user = User.query.get(user_id)\n return jsonify({ 'user': user.to_dict(), 'token': token.decode('UTF-8') }), 200",
"def get_token(request):\n request_json = request.get_json()\n # response = dict()\n if request.authorization and 'password' in request.authorization and 'username' in request.authorization:\n pwd = request.authorization.get('password')\n user = request.authorization.get('username')\n if pwd == 'password':\n token = jwt.encode({\"user\": user,\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=20)}, SECRET_KEY,\n algorithm=\"HS256\")\n\n return jsonify({'token': token})\n\n return make_response(\"basic login required\", 404, {\"www-authenticate\": \"basic login required\"})",
"def authenticate(credentials):",
"def jwt_auth(client):\n return JwtAuthActions(client)",
"def _generate_gcp_jwt():\n role = current_app.config.get('VAULT_AUTH_ROLE')\n account = current_app.config.get('VAULT_AUTH_ACCOUNT')\n\n if role and account:\n headers = {'Metadata-Flavor': 'Google'}\n url = 'http://metadata/computeMetadata/v1/instance/service-accounts/' + account + '/identity'\n try:\n data = [('audience', current_app.config.get('VAULT_URL') + '/vault/' + role), ('format', 'full')]\n resp = requests.post(url, headers=headers, data=data)\n\n if resp.status_code != 200 and resp.status_code != 204:\n current_app.logger.info('Vault: ' + resp.text)\n raise Exception('Vault GCP Auth: Issues retrieving JWT.')\n\n return resp.text\n\n except ConnectionError as ConnError:\n current_app.logger.info('Vault: There was an error while connecting to GCE metadata.')\n raise RuntimeError('Vault: There was an error while connecting to GCE metadata.\\n{}'.format(ConnError))\n\n else:\n raise RuntimeError('Vault Config: Role and Service Account not set.')",
"def get_token():\n req = request.get_json()\n username = str(req['username'])\n password = str(req['password'])\n if User.username_password_match(username, password):\n expiration_date = datetime.datetime.utcnow() + \\\n datetime.timedelta(seconds=100)\n token = jwt.encode({'exp': expiration_date}, app.config['SECRET_KEY'], algorithm='HS256')\n return token\n return Response('', 401, mimetype='application/json')",
"def auth_token_api():\n data = request.get_json()\n if not data:\n response = jsonify({\n 'success': False,\n 'message': 'Missing request body'\n })\n response.status_code = 422\n return response\n\n # process argument\n login_type = data.get('auth_type')\n email = data.get('email').strip().lower()\n password = data.get('password')\n\n if not login_type or login_type not in ['email']:\n response = jsonify({\n 'success': False,\n 'message': 'Invalid auth_type'\n })\n response.status_code = 422\n return response\n\n # email authentication\n elif login_type == 'email':\n if not email:\n response = jsonify({\n 'success': False,\n 'message': 'Must provide email when auth_type is \"email\"'\n })\n response.status_code = 422\n return response\n user = db.session.query(User).filter(User.email == email, User.deleted == False).one_or_none()\n if not user:\n response = jsonify({\n 'success': False,\n 'message': 'Not Authorized: invalid email'\n })\n response.status_code = 403\n return response\n # check the user's password\n password_valid = check_password_hash(user.password, password)\n if not password_valid:\n response = jsonify({\n 'success': False,\n 'message': 'Not Authorized: invalid password'\n })\n response.status_code = 403\n return response\n\n token = generate_auth_token(user_id=user.user_id)\n response = jsonify({\n 'success': True,\n 'token': token\n })\n response.status_code == '200'\n return response",
"def login():\n req = flask.request.get_json(force=True)\n username = req.get('username', None)\n password = req.get('password', None)\n user = guard.authenticate(username, password)\n ret = {'access_token': guard.encode_jwt_token(user)}\n return ret, 200",
"def auth():\n pass",
"def auth():\n pass",
"async def authenticate(self, request: web.Request) -> Dict[str, Any]:",
"def get_auth_token():\n headers = {\n 'Content-Type': 'text/plain;charset=UTF-8', }\n data = '{ \\\n \"auth\": { \\\n \"identity\": { \\\n \"methods\": [ \\\n \"password\" \\\n ], \\\n \"password\": { \\\n \"user\": { \\\n \"name\": \"zheng_zhao\", \\\n \"password\": \"ZhaoZheng0426\", \\\n \"domain\": { \\\n \"name\": \"hwstaff_y00465251\" \\\n } \\\n } \\\n } \\\n }, \\\n \"scope\": { \\\n \"project\": { \\\n \"id\": \"454add6b26d04f53ae5c593551acf1ff\" \\\n } \\\n } \\\n } \\\n }'\n\n r = requests.post('https://iam.cn-north-1.myhuaweicloud.com/v3/auth/tokens',\n headers=headers, data=data)\n\n # print(r.status_code)\n # print(r.headers)\n token = r.headers.get('X-Subject-Token')\n\n return token",
"def requires_auth(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n token = get_token_auth_header()\n jsonurl = urlopen(\"https://\"+AUTH0_DOMAIN+\"/.well-known/jwks.json\")\n jwks = json.loads(jsonurl.read())\n unverified_header = jwt.get_unverified_header(token)\n rsa_key = {}\n for key in jwks[\"keys\"]:\n if key[\"kid\"] == unverified_header[\"kid\"]:\n rsa_key = {\n \"kty\": key[\"kty\"],\n \"kid\": key[\"kid\"],\n \"use\": key[\"use\"],\n \"n\": key[\"n\"],\n \"e\": key[\"e\"]\n }\n if rsa_key:\n try:\n payload = jwt.decode(\n token,\n rsa_key,\n algorithms=ALGORITHMS,\n audience=API_AUDIENCE,\n issuer=\"https://\"+AUTH0_DOMAIN+\"/\"\n )\n except jwt.ExpiredSignatureError:\n raise AuthError({\"code\": \"token_expired\",\n \"description\": \"token is expired\"}, 401)\n except jwt.JWTClaimsError:\n raise AuthError({\"code\": \"invalid_claims\",\n \"description\":\n \"incorrect claims,\"\n \"please check the audience and issuer\"}, 401)\n except Exception:\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Unable to parse authentication\"\n \" token.\"}, 401)\n\n _request_ctx_stack.top.current_user = payload\n return f(*args, **kwargs)\n raise AuthError({\"code\": \"invalid_header\",\n \"description\": \"Unable to find appropriate key\"}, 401)\n return decorated",
"def requires_auth(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n token = get_token_auth_header()\n jsonurl = urlopen(\"https://\"+AUTH0_DOMAIN+\"/.well-known/jwks.json\")\n jwks = json.loads(jsonurl.read())\n unverified_header = jwt.get_unverified_header(token)\n rsa_key = {}\n for key in jwks[\"keys\"]:\n if key[\"kid\"] == unverified_header[\"kid\"]:\n rsa_key = {\n \"kty\": key[\"kty\"],\n \"kid\": key[\"kid\"],\n \"use\": key[\"use\"],\n \"n\": key[\"n\"],\n \"e\": key[\"e\"]\n }\n if rsa_key:\n try:\n payload = jwt.decode(\n token,\n rsa_key,\n algorithms=ALGORITHMS,\n audience=API_AUDIENCE,\n issuer=\"https://\"+AUTH0_DOMAIN+\"/\"\n )\n except jwt.ExpiredSignatureError:\n raise AuthError({\"code\": \"token_expired\",\n \"description\": \"token is expired\"}, 401)\n except jwt.JWTClaimsError:\n raise AuthError({\"code\": \"invalid_claims\",\n \"description\":\n \"incorrect claims,\"\n \"please check the audience and issuer\"}, 401)\n except Exception:\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Unable to parse authentication\"\n \" token.\"}, 401)\n\n _request_ctx_stack.top.current_user = payload\n return f(*args, **kwargs)\n raise AuthError({\"code\": \"invalid_header\",\n \"description\": \"Unable to find appropriate key\"}, 401)\n return decorated",
"def login(request):\n if request.method != 'POST':\n return JsonResponse(\n {\"detail\": 'Method {} not allowed.'.format(request.method)}, status=status.HTTP_405_METHOD_NOT_ALLOWED\n )\n username = request.POST['username']\n password = request.POST['password']\n if username == 'admin' and password == 'admin':\n payload = {\n \"username\": username,\n \"exp\": time.time() + 300,\n\n }\n token = encode(payload, JWT_KEY, algorithm=\"HS256\")\n return JsonResponse({\"access_token\": token}, status=status.HTTP_200_OK)\n else:\n return JsonResponse({\"detail\": 'Invalid username or password'}, status=status.HTTP_401_UNAUTHORIZED)",
"def authenticate(token: JWT) -> AuthResponse:\n payload = jwt.decode(token)\n return payload[\"policies\"], payload[\"access_key\"]",
"def log_in(jwt):\n return current_app.library_registry.admin_controller.log_in(jwt)",
"def login():\n req = request.get_json(force=True)\n username = req.get('username', None)\n password = req.get('password', None)\n user = guard.authenticate(username, password)\n ret = {'access_token': guard.encode_jwt_token(user)}\n return ret, 200",
"def login():\n request_data = request.get_json()\n\n if User.authenticate(request_data['username'], request_data['password']):\n expiration = datetime.datetime.now() + datetime.timedelta(minutes=20)\n token = jwt.encode(\n {'exp': expiration},\n app.config['SECRET_KEY'],\n algorithm='HS256'\n ).decode()\n return jsonify({'token': token}), 200\n\n return Response(\n json.dumps({'error': 'Invalid username / password'}),\n 400,\n mimetype='application/json'\n )",
"def auth_token(self):",
"def _v3_auth(self, url):\n body = {\n \"auth\": {\n \"identity\": {\n \"methods\": [\"password\"],\n \"password\": {\n \"user\": {\n \"domain\": {\n \"name\": self.domain\n },\n \"name\": self.user,\n \"password\": self.password\n }\n }\n },\n \"scope\": {\n \"project\": {\n \"domain\": {\n \"name\": self.domain\n },\n \"name\": self.project\n }\n }\n }\n }\n if not url.endswith('/'):\n url += \"/\"\n return self.post(url + 'auth/tokens', body)",
"def login():\n\n\n params = request.get_json()\n username = params.get('username', None)\n password = params.get('password', None)\n\n if not username:\n return jsonify({\"msg\": \"Missing username parameter\"}), Status.HTTP_BAD_REQUEST\n if not password:\n return jsonify({\"msg\": \"Missing password parameter\"}), Status.HTTP_BAD_REQUEST\n\n # TODO Check from DB here\n if (username == 'admin' or username == 'user') and password == 'admin':\n logger.info('Logged in %s', username)\n else:\n return jsonify({\"msg\": \"Bad username or password\"}), Status.HTTP_BAD_UNAUTHORIZED\n # Identity can be any data that is json serializable\n # TODO: rather than passing expiry time here explicitly, decode token on client side. But I'm lazy.\n ret = {'jwt': create_jwt(identity=username), 'exp': datetime.utcnow() + current_app.config['JWT_EXPIRES']}\n return jsonify(ret), 200",
"def require_authentication(f):\n def wrapper(*args, **kwargs):\n logger.info('Validating jwt')\n if request.method == 'POST':\n jwt_bearer = request.get_json()['jwt-bearer']\n logger.info(jwt_bearer)\n else:\n jwt_bearer = request.args['jwt-bearer']\n logger.info(jwt_bearer)\n if jwt_bearer:\n validate = requests.get(SERVICES['AUTHENTICATION']['VALIDATE'], params={'jwt': jwt_bearer}, headers={'Authorization':'Bearer ' + JWT}).json()\n if validate['ack'] == 'true':\n kwargs['service_name'] = validate['audience']\n return f(*args, **kwargs)\n return {'ack': 'false',\n 'msg': 'Authentication Requited.'}, 403\n return wrapper",
"def checkJWT(token, roles_to_check, uuid=\"\", auth_mode=None, return_resp=False):\n\n print (\". \"*50)\n\n # is_authorized = True\n\n ### set the collection to user\n mongoColl = mongoConfigColls['endpoints']\n log_app.debug(\"checkJWT / auth_mode : %s\", auth_mode )\n log_app.debug(\"checkJWT / roles_to_check : %s\", roles_to_check )\n\n if auth_mode and uuid != \"\" : \n\n if 'all' not in roles_to_check :\n\n ### retrieving the root_url for authentication in general given the AUTH_MODE\n root_auth_doc = mongoColl.find_one({'apiviz_front_uuid': uuid, 'field' : 'app_data_API_root_auth'})\n # log_app.debug(\"checkJWT / root_auth_doc : \\n%s\", pformat(root_auth_doc) )\n\n auth_url = root_auth_doc['root_url'][auth_mode]\n log_app.debug( \"checkJWT / auth_url : %s\", pformat(auth_url) )\n\n ### retrieving the root_url and args for authentication\n confirm_auth_doc = mongoColl.find_one({'apiviz_front_uuid': uuid, 'field' : 'app_data_API_user_auth'})\n confirm_rooturl = confirm_auth_doc['root_url']\n confirm_user_role_path = confirm_auth_doc['resp_fields']['user_role']['path']\n log_app.debug( \"checkJWT / confirm_user_role_path : %s\", confirm_user_role_path) \n\n confirm_basestring = auth_url + confirm_rooturl\n # log_app.debug( \"checkJWT / confirm_basestring : %s\", pformat(confirm_basestring) )\n \n confirm_options = confirm_auth_doc['args_options']\n confirm_token_arg = ''\n for arg in confirm_options : \n if arg['app_arg'] == 'authToken' : \n confirm_arg = '?{}={}'.format(arg['arg'], token)\n \n confirm_url = confirm_basestring + confirm_arg\n # log_app.debug( \"checkJWT / confirm_url : %s\", pformat(confirm_url) )\n\n ### send request to service and read response\n auth_response = requests.get(confirm_url)\n auth_response_status = auth_response.status_code\n log_app.debug( \"checkJWT / auth_response_status : %s\", auth_response_status )\n auth_response_data = auth_response.json()\n # log_app.debug( \"checkJWT / auth_response_data : \\n%s\", pformat(auth_response_data) )\n\n print (\". \"*50)\n\n if return_resp : \n # return full auth response\n return {\n 'auth_response_data' : auth_response_data,\n 'auth_response_status' : auth_response_status,\n 'confirm_auth_doc' : confirm_auth_doc,\n }\n\n else :\n ### get role to check value in response\n auth_response_user_role = getValueFromDictAndPathString(auth_response_data, confirm_user_role_path)\n log_app.debug( \"checkJWT / auth_response_user_role : %s\", auth_response_user_role) \n # return is_authorized\n return auth_response_user_role in roles_to_check\n \n else : \n return True\n\n else : \n return False",
"def LoginCheck():\n jwt_data = get_jwt()\n if jwt_data['roles'] != 'admin':\n return jsonify(msg=\"Permission denied\"), Status.HTTP_BAD_FORBIDDEN\n\n identity = get_jwt_identity()\n if not identity:\n return jsonify({\"msg\": \"Token invalid\"}), Status.HTTP_BAD_UNAUTHORIZED\n\n data = {\"msg\": \"Loggeed In\"}\n json_response = json.dumps(data)\n return Response(json_response,\n status=Status.HTTP_OK_BASIC,\n mimetype='application/json')",
"def get_auth_token():\n token = g.user.generate_auth_token(24*3600)\n return jsonify({'user_id': g.user.id, 'token': token.decode('ascii')})",
"def get_jwt() -> Tuple[str, int]:\n if current_user and current_user.is_authenticated:\n return jsonify(jwt=create_jwt_for_user(current_user)), 200\n\n credentials = request.get_json(force=True)\n if not credentials or 'email' not in credentials or 'password' not in credentials:\n return jsonify(error=\"Incomplete credentials.\"), 400\n\n user = user_datastore.find_user(email=credentials['email'])\n if verify_and_update_password(credentials['password'], user):\n jwt_json: str = jsonify(jwt=create_jwt_for_user(user))\n return jwt_json, 200\n\n jwt_error_json: str = jsonify(error=\"Could not authenticate user.\")\n return jwt_error_json, 401",
"def requires_auth(logger, key, algorithms = 'HS256'):\n def decorator(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n try:\n # get JWT token:\n token = get_token()\n # authentication:\n payload = decode_jwt(token, key, algorithms)\n except JWTError as e:\n # add to log:\n logger.error(e.error[\"description\"])\n # abort:\n abort(e.status_code, description=e.error[\"description\"])\n return f(payload, *args, **kwargs)\n return decorated_function\n return decorator",
"def get_auth_token_student():\n\n token = g.user.generate_auth_token(600)\n return jsonify({'token': token.decode('ascii'), 'duration': 600})"
] | [
"0.6536674",
"0.6521387",
"0.64874554",
"0.6439283",
"0.64367366",
"0.64048326",
"0.63683885",
"0.63635916",
"0.6344219",
"0.6336104",
"0.6336104",
"0.6291958",
"0.62848336",
"0.62541",
"0.62541",
"0.62484086",
"0.6247485",
"0.62096065",
"0.6151544",
"0.60900754",
"0.608544",
"0.607672",
"0.6057407",
"0.60288",
"0.60214597",
"0.6013388",
"0.6008364",
"0.60060185",
"0.59967536",
"0.59594995"
] | 0.65679944 | 0 |
Specified to add the potential partial solution to the info dict. | def expose_potential_partial_solution(self):
self._is_partial_solution_exposed = True
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_check_partials_yes(self):\n cp_save = dm.options['include_check_partials']\n dm.options['include_check_partials'] = True\n\n cases = [self.brach_explicit_partials,\n self.balanced_field_partials_radau,\n self.min_time_climb_partials_gl]\n\n partials = {}\n for c in cases:\n partials.update(c())\n\n dm.options['include_check_partials'] = cp_save\n\n assert len(partials.keys()) > 0",
"def partial_result(self, hyp):\n print(\"Partial:\"+hyp)",
"def partial_result(self, hyp, uttid):\n print \"Partial: \" + hyp",
"def add_solution(self, solution):\n if self.check_solution(solution):\n self._solution = solution\n self.solution_status = 'OK'\n else:\n self._solution = None\n self.solution_status = 'X'",
"def partial_result(self, hyp):\n rospy.logdebug(\"Partial: \" + hyp)",
"def test_check_partials_no(self):\n cp_save = dm.options['include_check_partials']\n dm.options['include_check_partials'] = False\n\n cases = [self.brach_explicit_partials,\n self.balanced_field_partials_radau,\n self.min_time_climb_partials_gl]\n\n partials = {}\n for c in cases:\n partials.update(c())\n\n dm.options['include_check_partials'] = cp_save\n\n # Only `phase.ode` should show up in in the partials keys.\n self.assertSetEqual(set(partials.keys()), {'phase0.ode'})",
"def test_ipam_vrfs_partial_update(self):\n pass",
"def has_solution(self) -> bool:\n pass",
"def test_load_extra_first():\n solution_repo = SolutionRepository(\n os.path.join(os.path.dirname(__file__), \"extra_only_solution.txt\")\n )\n assert solution_repo.solution[\"extra_only\"].metadata.name == \"extra_only\"\n\n solution_repo = SolutionRepository(\n os.path.join(os.path.dirname(__file__), \"extra_only_solution_no_extras.txt\")\n )\n assert solution_repo.solution[\"extra_only\"].metadata.name == \"extra_only\"",
"def RestrictionFullDeployment(self, alphaCompId, notInConflictCompsIdList):\n for j in range(self.nrVM):\n if self.solverTypeOptimize:\n bvars1 = [self.a[alphaCompId * self.nrVM + j]]\n bvars2 = [self.a[_compId * self.nrVM + j] for _compId in notInConflictCompsIdList]\n bvars3 = [self.a[i+j] for i in range(0, len(self.a), self.nrVM)]\n bvars = bvars1 + bvars2\n\n self.solver.add(Implies(PbGe([(x, 1) for x in bvars3], 1), PbEq([(x, 1) for x in bvars], 1)))\n\n else:\n self.solver.assert_and_track(\n (sum([If(self.a[alphaCompId * self.nrVM + j], 1, 0)] + [If(self.a[_compId * self.nrVM + j], 1, 0) for _compId in notInConflictCompsIdList])) ==\n (sum([If(self.a[i + j], 1, 0) for i in range(0, len(self.a), self.nrVM)]) >= 1),\n \"LabelFullDeployment: \" + str(self.labelIdx)\n )\n self.labelIdx += 1",
"def _add_hints(self, **hints):\n self._hints.update(hints)",
"def partial_a(self):\n return 1",
"def add_partial(self, f, tf=None, fidx=None,\n wind=None, nhop=None, \n t=None, idx=None):\n self.idx = idx",
"def notify_solution(self, sol):\n pass # pragma: no cover",
"def solve(self):\n pass",
"def solve(self):\n pass",
"def test_ipam_vlans_partial_update(self):\n pass",
"def test_client_tax_information_partial_update(self):\n pass",
"def infection_partial(self):\n return self.disease.infection_partial()",
"def test_solution_usage(self, test_data):\n for finput in test_data[\"EKFSLAM.EKFSLAM.add_landmarks\"][:1]:\n params = finput\n\n solution.used[\"EKFSLAM.EKFSLAM.add_landmarks\"] = False\n\n EKFSLAM.EKFSLAM.add_landmarks(**params)\n\n assert not solution.used[\"EKFSLAM.EKFSLAM.add_landmarks\"], \"The function uses the solution\"",
"def doParametersOfInterest(self):\r\n if self.fg4fixed:\r\n self.modelBuilder.doVar(\"CMS_zz4l_fg4[0]\")\r\n self.modelBuilder.doVar(\"r[1,0,4]\")\r\n print \"Fixing CMS_zz4l_fg4\"\r\n poi = \"r\"\r\n else:\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg4\"):\r\n print \"have fg4 inside\"\r\n else:\r\n self.modelBuilder.doVar(\"CMS_zz4l_fg4[0.,-1,1]\")\r\n poi = \"CMS_zz4l_fg4\"\r\n if self.cPOI:\r\n if self.modelBuilder.out.var(\"cww_zz\"):\r\n print \"have czz_ww inside\"\r\n else:\r\n self.modelBuilder.doVar(\"cww_zz[0.5,-10,10]\")\r\n poi += \",cww_zz\"\r\n\r\n if self.fg2POI:\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg2\"):\r\n print \"have fg2 inside\"\r\n else:\r\n self.modelBuilder.doVar(\"CMS_zz4l_fg2[0.,0,1]\")\r\n poi += \",CMS_zz4l_fg2\"\r\n if self.muFloating:\r\n self.modelBuilder.doVar(\"r[1,0,2000]\")\r\n if self.muAsPOI:\r\n print \"Treating r as a POI\"\r\n poi += \",r\"\r\n else:\r\n self.modelBuilder.out.var(\"r\").setAttribute(\"flatParam\")\r\n if self.phiFloating:\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg4phi\"):\r\n print \"have fg4phi inside\"\r\n else: \r\n self.modelBuilder.doVar(\"CMS_zz4l_fg4phi[0.,-3.1415926,3.1415926]\")\r\n if self.phiPOI:\r\n poi += \",CMS_zz4l_fg4phi\"\r\n else:\r\n self.modelBuilder.out.var(\"CMS_zz4l_fg4phi\").setAttribute(\"flatParam\")\r\n if self.phi2Floating:\r\n #self.modelBuilder.doVar(\"CMS_zz4l_fg4phi[0.,-math.pi,math.pi]\")\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg2phi\"):\r\n print \"have fg2phi inside\"\r\n else: \r\n self.modelBuilder.doVar(\"CMS_zz4l_fg2phi[0.,-3.1415926,3.1415926]\")\r\n self.modelBuilder.out.var(\"CMS_zz4l_fg2phi\").setAttribute(\"flatParam\")\r\n \r\n self.modelBuilder.doSet(\"POI\",poi)",
"def solution(self, solution: Path, bundle: Bundle):\n pass",
"def eq_in_evaluate_partial_derivative(self, eq, used_jacobian_vars):\n eq.setdefault('in_evaluate_partial_derivative', []).append(eq['sympy_lhs']\n in [v[0] for v in used_jacobian_vars\n if v[0] is not None])",
"def _core_fix_params(self,bx,phase,eqnid) :\n\t\tif self.ss.exptype == 'fullinfo' :\n\t\t\treturn bx\n\t\telif self.ss.exptype in ('noinfo','partialinfo') :\n\t\t\treg_fix = self._regressors_fix[eqnid] \n\t\t\tif phase == 1 : \n\t\t\t\tprod_fix = reg_fix['prod']\n\t\t\t\tfor varid in prod_fix : \n\t\t\t\t\tbx[varid] = 0.0\n\t\t\telif phase == 2 : \t\t\t\n\t\t\t\tdegrad_fix = reg_fix['degrad']\n\t\t\t\tfor varid in degrad_fix :\n\t\t\t\t\tbx[varid] = 0.0 \n\t\t\telse : \n\t\t\t\tself.logger.error(\"Incorrect phase %r\"%(phase))\n\t\t\t\tsys.exit(1)\n\t\telse :\n\t\t\tself.logger.error(\"Unrecognized exptype %s quitting...\"%\\\n\t\t\t(self.ss.exptype))\n\n\t\treturn bx",
"def test_solution_usage(self, test_data):\n for finput in test_data[\"EKFSLAM.EKFSLAM.h\"][:1]:\n params = finput\n\n solution.used[\"EKFSLAM.EKFSLAM.h\"] = False\n\n EKFSLAM.EKFSLAM.h(**params)\n\n assert not solution.used[\"EKFSLAM.EKFSLAM.h\"], \"The function uses the solution\"",
"def partial_result(self, hyp, uttid):\n rospy.logdebug(\"Partial: \" + hyp)",
"def test_solution_usage(self, test_data):\n for finput in test_data[\"EKFSLAM.EKFSLAM.update\"][:1]:\n params = finput\n\n solution.used[\"EKFSLAM.EKFSLAM.update\"] = False\n\n EKFSLAM.EKFSLAM.update(**params)\n\n assert not solution.used[\"EKFSLAM.EKFSLAM.update\"], \"The function uses the solution\"",
"def sketch_of_solution(self,sol=None):\n raise NotImplementedError",
"def test_client_risk_assessment_partial_update(self):\n pass",
"def get_solution(self):\n solution = self.raw_solution\n if solution is not None:\n return {\n \"solution\": self.raw_solution\n }"
] | [
"0.5336766",
"0.5204248",
"0.50944495",
"0.50701666",
"0.503315",
"0.5031378",
"0.49841353",
"0.49736056",
"0.4957122",
"0.493283",
"0.49248284",
"0.49117887",
"0.49109685",
"0.49100757",
"0.4907853",
"0.4907853",
"0.49010634",
"0.4893403",
"0.48574725",
"0.48571035",
"0.4852419",
"0.4845431",
"0.48286226",
"0.48269445",
"0.4826276",
"0.48218444",
"0.48201618",
"0.48146024",
"0.48136485",
"0.47937265"
] | 0.6785565 | 0 |
Specified to add the full ground truth state to the info dict. | def add_ground_truth_state_to_info(self):
self._is_ground_truth_state_exposed = True
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __add_current_state_to_state_dict(self):\n board_fen = self.board_fen()\n if board_fen not in self.states:\n self.states[self.board_fen()] = GameState(self.board_array())",
"def update_truth(self, ground_truth):\n self.ground_truth = ground_truth",
"def __init__(self):\n super().__init__()\n self._supported[\"mixed_states\"] = True\n self._short_name = \"fock\"",
"def show_ground_feature():\n ground_description_int = GROUND_FEATURES_LIST[ZERO_BASE_PLYR_POS]\n mvaddstr(16, 3, GROUND_DESCRIPTIONS.get(ground_description_int), color_pair(GROUND_FEATURES_COLOUR) | A_BOLD)",
"def extra_state_attributes(self) -> dict[str, Any]:\n return {\n \"zone_idx\": self._device.idx,\n \"heating_type\": self._device.heating_type,\n \"mode\": self._device.mode,\n \"config\": self._device.config,\n **super().extra_state_attributes,\n \"schedule\": self._device.schedule,\n \"schedule_version\": self._device.schedule_version,\n }",
"def state_information(self) -> Dict[str, Any]:\n raise NotImplementedError(\"Device subclass needs to implement this.\")",
"def extra_state_attributes(self):\n return {ATTR_DEVICE: \"SKYBEACON\", ATTR_MODEL: 1}",
"def extra_state_attributes(self):\n return {ATTR_DEVICE: \"SKYBEACON\", ATTR_MODEL: 1}",
"async def async_added_to_hass(self):\n # If not None, we got an initial value.\n await super().async_added_to_hass()\n state = await self.async_get_last_state()\n if not state:\n return\n self._state = state.state == \"on\"",
"async def async_added_to_hass(self):\n # If not None, we got an initial value.\n await super().async_added_to_hass()\n state = await self.async_get_last_state()\n \n if state is not None:\n\n if state.state is not None:\n self._attr_native_value = state.state\n self._attr_state = state.state\n self._hass.data[DOMAIN][get_gas_tariff_override_key(self._serial_number, self._mprn)] = self._attr_native_value\n \n self._attributes = {}\n for x in state.attributes.keys():\n self._attributes[x] = state.attributes[x]\n \n _LOGGER.debug(f'Restored OctopusEnergyPreviousAccumulativeGasCostTariffOverride state: {self._attr_state}')",
"def createEmptyLayer(self):\n # , wt.greeting: False , wt.ects: False, wt.preReqs: False, wt.courseCodeMentioned: False\n layer = {wt.questionWord: \"\", wt.pronoun: \"\", wt.verb: \"\", wt.websiteName: \"\", wt.timeWord: \"\", wt.about: \"\",\n wt.weather: \"\", wt.when: \"\", wt.keywords: [], wt.courseID: \"\", wt.structureUnitCode: \"\",\n wt.sentence: [], wt.hangman: \"\", wt.what: \"\"}\n return layer",
"def reset(self, env, last_info=None):\n super().reset(env)\n\n # Override with targets from last_info (and the goal image!).\n if self.goal_cond_testing:\n assert last_info is not None\n self.goal['places'] = self._get_goal_info(last_info)",
"def add_state(self):\n return self.fst.AddState()",
"async def async_added_to_hass(self):\n # If not None, we got an initial value.\n await super().async_added_to_hass()\n if self._state is not None:\n return\n\n state = await self.async_get_last_state()\n self._state = state and state.state == STATE_ON",
"def registerInitialState(self, gameState):\r\n\r\n '''\r\n Make sure you do not delete the following line. If you would like to\r\n use Manhattan distances instead of maze distances in order to save\r\n on initialization time, please take a look at\r\n CaptureAgent.registerInitialState in captureAgents.py.\r\n '''\r\n CaptureAgent.registerInitialState(self, gameState)\r\n\r\n '''\r\n Your initialization code goes here, if you need any.\r\n '''\r\n self.totalFoodNum = float(len(self.getFood(gameState).asList()))\r\n\r\n self.needFallback = False\r\n\r\n self.mapwidth = gameState.data.layout.width\r\n self.mapheight = gameState.data.layout.height\r\n self.myBorders = self.getMyBorder(gameState)\r\n self.enemyBorders = self.getEnemyBorder(gameState)\r\n # self.oneKi,self.twoKi,self.threeKi,self.fourKi = self.pointClassification(gameState)\r\n # self.dangerPath = self.getDangerPath(gameState)\r",
"def update(self) -> None:\n state = int(self._light.is_on())\n self._state = bool(state)\n self._brightness = to_hass_level(state)",
"def event_m20_11_4000010():\n \"\"\"State 0,2: [Lib] Character: Petrified: Appearance setting_SubState\"\"\"\n assert event_m20_11_x48(z76=5300, z77=0, z78=211000030, z79=0, z80=0, z81=4000000)\n \"\"\"State 1: Finish\"\"\"\n EndMachine()",
"def extra_state_attributes(self):\n state_attrs = {}\n if self.latency is not None:\n state_attrs[\"latency\"] = self.latency\n return state_attrs",
"def __init__(self):\n super().__init__()\n self._supported[\"mixed_states\"] = True\n self._short_name = \"gaussian\"",
"def extra_state_attributes(self):\n return {\n ATTR_ATTRIBUTION: ATTRIBUTION,\n ATTR_STATION: self.probe.get_data(\"station_name\"),\n ATTR_UPDATED: self.probe.last_update.isoformat(),\n }",
"def state(self):\n pass",
"def extra_state_attributes(self) -> dict[str, Any]:\n return {\n \"heat_demand\": self._device.heat_demand,\n \"heat_demands\": self._device.heat_demands,\n \"relay_demands\": self._device.relay_demands,\n \"system_mode\": self._device.system_mode,\n \"tpi_params\": self._device.tpi_params,\n # \"faults\": self._device.faultlog,\n }",
"def extra_state_attributes(self) -> Mapping[str, str]:\n if (update_time := self.coordinator.data[\"last_update\"]) is not None:\n update_time = update_time.isoformat()\n return {\n ATTR_STATION: self.coordinator.data[\"Name\"],\n ATTR_UPDATED: update_time,\n }",
"def extra_state_attributes(self):\n attributes = {}\n if self._multiroom_group != []:\n attributes[ATTR_LINKPLAY_GROUP] = self._multiroom_group\n attributes[ATTR_GROUP_MEMBERS] = self._multiroom_group\n\n attributes[ATTR_MASTER] = self._is_master\n if self._slave_mode:\n attributes[ATTR_SLAVE] = self._slave_mode\n if self._media_uri_final:\n attributes[ATTR_MEDIA_CONTENT_ID] = self._media_uri_final\n if len(self._trackq) > 0:\n attributes[ATTR_TRCNT] = len(self._trackq) - 1\n if self._trackc:\n attributes[ATTR_TRCRT] = self._trackc\n if self._uuid != '':\n attributes[ATTR_UUID] = self._uuid\n\n attributes[ATTR_TTS] = self._playing_tts\n attributes[ATTR_SNAPSHOT] = self._snapshot_active\n attributes[ATTR_SNAPSPOT] = self._snap_spotify\n \n attributes[ATTR_MASS_POSITION] = self._mass_position\n\n if DEBUGSTR_ATTR:\n atrdbg = \"\"\n if self._playing_localfile:\n atrdbg = atrdbg + \" _playing_localfile\"\n\n if self._playing_spotify:\n atrdbg = atrdbg + \" _playing_spotify\"\n\n if self._playing_webplaylist:\n atrdbg = atrdbg + \" _playing_webplaylist\"\n\n if self._playing_stream:\n atrdbg = atrdbg + \" _playing_stream\"\n\n if self._playing_liveinput:\n atrdbg = atrdbg + \" _playing_liveinput\"\n\n if self._playing_tts:\n atrdbg = atrdbg + \" _playing_tts\"\n \n if self._playing_mediabrowser:\n atrdbg = atrdbg + \" _playing_mediabrowser\"\n \n if self._playing_mass:\n atrdbg = atrdbg + \" _playing_mass\"\n\n attributes[ATTR_DEBUG] = atrdbg\n\n if self._state != STATE_UNAVAILABLE:\n attributes[ATTR_FWVER] = self._fw_ver + \".\" + self._mcu_ver\n\n return attributes",
"def get_new_gamestate(self):",
"def __get_complete_state(self):\n\n # create a state with all objects and agents\n state = {}\n for obj_id, obj in self.__environment_objects.items():\n state[obj.obj_id] = obj.properties\n for agent_id, agent in self.__registered_agents.items():\n state[agent.obj_id] = agent.properties\n\n # Append generic properties (e.g. number of ticks, size of grid, etc.}\n state[\"World\"] = {\n \"nr_ticks\": self.__current_nr_ticks,\n \"curr_tick_timestamp\": int(round(time.time() * 1000)),\n \"grid_shape\": self.__shape,\n \"tick_duration\": self.tick_duration,\n \"world_ID\": self.world_ID,\n \"vis_settings\": {\n \"vis_bg_clr\": self.__visualization_bg_clr,\n \"vis_bg_img\": self.__visualization_bg_img\n }\n }\n\n return state",
"def state_info(self):\n raise NotImplementedError()",
"def set_defined(self):\n self._defined = 1",
"def get_state(self) -> Dict[str, Any]:\n return {\"aq_potential_num\": self.aq_potential_num, \"wq_potential_num\": self.wq_potential_num}",
"def update(self):\r\n if self._block.info_values is not None:\r\n self._state = self._block.info_values.get(self._sensor_name, None)"
] | [
"0.6266897",
"0.61687815",
"0.5671283",
"0.54962367",
"0.54451495",
"0.5404208",
"0.53883934",
"0.53883934",
"0.5335978",
"0.5324146",
"0.53026766",
"0.52942204",
"0.5273841",
"0.52662015",
"0.525729",
"0.52440625",
"0.5214837",
"0.5212029",
"0.5202931",
"0.5199911",
"0.5198641",
"0.5174509",
"0.51726806",
"0.5151344",
"0.5132851",
"0.51272076",
"0.5122904",
"0.5104485",
"0.510448",
"0.51034117"
] | 0.8061191 | 0 |
Determines the number of valleys that were walked through given a sequence of characters representing an altitudinal traveral ("U" stands for "up", "D" stands for "down"). | def counting_valleys0(s):
# . Let v_steps track consecutiveness
# . If positive to negative and if v_steps < 2, then
# increment v_steps
# . If negative to positive, then reset v_steps to 1
# and increment valleys
# . THINK: You can easily implement a mountains passed
# counter if you switch inequalities
prev_sl, cur_sl = 0, 0
v_steps, valleys = 0, 0
m_steps, mountains = 0, 0 # For fun
for c in s:
prev_sl = cur_sl
if c == "U": cur_sl += 1
elif c == "D": cur_sl -= 1
# For mountains (for fun)
if prev_sl <= 0 and cur_sl > 0 and m_steps < 2:
m_steps += 1
elif prev_sl > 0 and cur_sl <= 0:
m_steps = 0
mountains += 1
# For valleys
if prev_sl >= 0 and cur_sl < 0 and v_steps < 2:
v_steps += 1
elif prev_sl < 0 and cur_sl >= 0:
v_steps = 0
valleys += 1
#return (valleys, mountains)
return valleys | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def countingValleys(n, s):\n\n elevation = 0\n valleys = 0\n\n for char in s:\n if char == 'U':\n elevation +=1\n elif char == 'D':\n if elevation == 0:\n valleys += 1\n elevation -= 1\n\n return valleys",
"def count_ambig(curr_seq, valid_chars='ATCG'):\r\n up_seq = curr_seq.upper()\r\n total = 0\r\n for vchar in valid_chars:\r\n total += up_seq.count(vchar)\r\n return len(curr_seq) - total",
"def uracil_count(RNAsequence):\n uracil = 0\n for nucleotide in RNAsequence:\n if nucleotide == 'U':\n uracil += 1\n return uracil",
"def length_uc(x):\r\n return sum(length(m) for m in metamer(x))",
"def count_runlength_per_character(sequence):\n character_counts = defaultdict(list)\n current_character = None\n\n for character in sequence:\n if character != current_character:\n character_counts[character].append(1)\n else:\n character_counts[character][-1] += 1\n\n current_character = character\n\n return character_counts",
"def n_count(dna_string):\n a_count = 0\n c_count = 0\n g_count = 0\n t_count= 0\n for nuc in dna_string:\n if nuc.upper() == 'A':\n a_count += 1\n elif nuc.upper() == 'C':\n c_count += 1\n elif nuc.upper() == 'G':\n g_count += 1\n elif nuc.upper() == 'T':\n t_count += 1\n else:\n continue\n print(a_count, c_count, g_count, t_count)",
"def real_len(u, alt=False):\n if not isinstance(u, str):\n u = u.decode(\"utf8\")\n\n u = xenc(u) # Handle replacements of unsuported characters\n\n ueaw = unicodedata.east_asian_width\n\n if alt:\n # widths = dict(W=2, F=2, A=1, N=0.75, H=0.5) # original\n widths = dict(N=.75, Na=1, W=2, F=2, A=1)\n\n else:\n widths = dict(W=2, F=2, A=1, N=1, H=0.5)\n\n return int(round(sum(widths.get(ueaw(char), 1) for char in u)))",
"def vowelcount(s):\n s = s.lower()\n nv = 0\n for v in 'aeiou':\n nv += s.count(v)\n return nv",
"def count_unanimous_answer(g):\n chars = set(g.replace('\\n', ''))\n ppl = g.splitlines()\n unanimous = 0\n for c in chars:\n if all([c in p for p in ppl]):\n unanimous += 1\n return unanimous",
"def get_length(dna):\n return len (dna)",
"def count_umbiguous_bases(sequence):\n sequence = sequence.upper()\n amb = ['N', 'R', 'Y', 'W', 'S', 'K', 'M']\n return sum({base: sequence.count(base) for base in amb}.values())",
"def length_score( canvas ):\n score = 0\n for seqpos, nt in canvas.nucleotides.iteritems():\n if seqpos + 1 not in canvas.nucleotides.keys(): continue\n\n d = distance( nt, canvas.nucleotides[seqpos+1] )\n #print \"Distance between %d and %d is %f\" % (seqpos, seqpos+1, d)\n #score += harmonic_penalty( d, NT_DISTANCE, spring_constant )\n score += flat_harmonic_penalty( d, NT_MIN_DISTANCE, NT_MAX_DISTANCE, spring_constant )\n\n return score",
"def countsyllables_nlde(word):\r\n\tresult = 0\r\n\tprev_was_vowel = word[0] in VOWELS\r\n\tfor char in word[1:]:\r\n\t\tis_vowel = char in VOWELS\r\n\t\tif prev_was_vowel and not is_vowel:\r\n\t\t\tresult += 1\r\n\t\tprev_was_vowel = is_vowel\r\n\r\n\tif (len(word) > 1 and word[0] in VOWELS\r\n\t\t\tand word.endswith('e') and not word[-2] in VOWELS):\r\n\t\tresult += 1\r\n\treturn result or 1",
"def number_positives(seq):\n # Convert sequence to upper case\n seq = seq.upper()\n\n # Check for a valid sequence\n for aa in seq:\n if aa not in bootcamp_utils.aa.keys():\n raise RuntimeError(aa + ' is not a valid amino acid.')\n\n return seq.count('R') + seq.count('K') + seq.count('H')",
"def vowelcount(string):\n allvowels = [\"a\", \"A\", \"e\", \"E\", \"i\", \"I\", \"o\", \"O\", \"u\", \"U\"]\n vowelnumber = 0\n for letter in range(len(string)):\n for vowel in range(len(allvowels)):\n if string[letter] == allvowels[vowel]:\n vowelnumber += 1\n else:\n pass\n return vowelnumber",
"def vowels_count(s):\n # Please print out which line of the above program contains an error. E.g. if the bug is on line 4 then print 4\n # END OF CONTEXT\n print(\"3\")\n # END OF SOLUTION",
"def num_syllables(self, word):\n \"\"\"\n using the logic of vowel counting, count all vowels in the pronunciations\n \"\"\"\n dictionary = self._pronunciations;\n # check if word is present in the CMU dictionary\n if word in dictionary :\n word_pronunciations = dictionary[word.lower()]\n else :\n return 1\n \n vowels = ['A', 'E', 'I', 'O', 'U']\n \n ## find the shorter pronunciation for word\n shorter_arr = [];\n for pronunciation in word_pronunciations :\n if len(pronunciation) > len(shorter_arr) : shorter_arr = pronunciation\n \n num_length = 0\n \n for phoneme in shorter_arr :\n if phoneme[:1] in vowels : num_length += 1\n \n return num_length",
"def get_length(dna):\n return len(dna)",
"def count_all_characters_of_the_pyramid(characters):\n if characters:\n total = 0\n for i in range(len(characters)):\n total += count_visible_characters_of_the_pyramid(characters[i:])\n return total\n else:\n return -1",
"def codon_counts(self):\n # Removing 5' UTR and 3' UTR sequences\n sequence = self.sequence.replace(self.five_prime_utr_sequence, \"\").replace(self.three_prime_utr_sequence, \"\")\n return len(sequence) / 3",
"def tour_length(tour):\n return sum(distance(tour[i], tour[i-1]) for i in range(len(tour)))",
"def vowelcount(x):\n vowel = ['a', 'e', 'i', 'o', 'u']\n counter = 0\n\n for i in range(len(vowel)):\n a = x.count(vowel[i].lower())\n counter = counter + a\n a = x.count(vowel[i].upper())\n counter = counter + a\n\n return counter",
"def state(towers):\n ret = 0\n for i, row in enumerate(towers):\n for val in row:\n ret += i * 4**(val-1)\n return ret",
"def get_alts_in_hom_pileup(pileup_str, ref_base):\n alts = {'A':0, 'C':0, 'G':0, 'T':0}\n for base in pileup_str:\n if base != ref_base and base in alts.keys():\n alts[base] += 1\n\n return max(alts, key=alts.get), alts[max(alts, key=alts.get)]",
"def vowelcount(string):\n string = string.upper()\n i = string.count('A')\n i = i + string.count('E')\n i = i + string.count('I')\n i = i + string.count('O')\n i = i + string.count('U')\n i = i + string.count('Ä')\n i = i + string.count('Ö')\n i = i + string.count('Ü')\n print(string, \"contains \", i, \" vowels\")",
"def run_length(s):\n\n s = re.sub(\"[^a-zA-Z]\",\"\", s)\n s = s.lower()\n cnt = 1\n y = []\n z = []\n ch = ''\n for i in range(len(s)):\n if i + 1 < len(s) and s[i] == s[i + 1]:\n cnt += 1\n else:\n if cnt > 1:\n z.append(s[i-1])\n y.append(str(cnt))\n else:\n z.append(s[i])\n y.append(str(cnt))\n cnt = 1\n for i in range(len(y)):\n ch = ch + y[i] + z[i]\n i += 1\n print ch",
"def count_visible_characters_of_the_pyramid(characters):\n if characters:\n return (2 * len(characters) - 1) ** 2\n else:\n return -1",
"def problem(args:int) -> int:\r\n\ts = 0\r\n\tfor i in range(1, args + 1):\r\n\t\ts += letter_count(i)\r\n\treturn s",
"def at_frequency(self):\n result = str(self.seq).count(\"A\") + str(self.seq).count(\"T\")\n return result",
"def count_nucleic_acids(self):\n n = 0\n for chain in self.iter_chains():\n n += chain.count_nucleic_acids()\n return n"
] | [
"0.787841",
"0.6130476",
"0.60904425",
"0.6050508",
"0.591375",
"0.58611935",
"0.5833444",
"0.5750658",
"0.56289816",
"0.5620559",
"0.5612861",
"0.5600865",
"0.5549459",
"0.553956",
"0.5531055",
"0.5510508",
"0.55068606",
"0.55058426",
"0.54959077",
"0.5491007",
"0.5478747",
"0.54505116",
"0.5443276",
"0.5433053",
"0.5428219",
"0.541775",
"0.54147565",
"0.5410862",
"0.54007894",
"0.5395172"
] | 0.719043 | 1 |
Returns a list of all tables relevant for a bind. | def get_tables_for_bind(self, bind=None):
return [table for table in list(self.Model.metadata.tables.values()) if table.info.get('bind_key') == bind] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_tables(conn):\n cur = conn.cursor()\n cur.execute(\"\"\"SELECT name FROM sqlite_master\n WHERE type='table' AND name NOT LIKE 'sqlite_%';\n \"\"\")\n tables = cur.fetchall()\n\n return tables",
"def meta_db_tables(self) -> list:\r\n def _passer(**kwargs):\r\n data = self.engine.execute(\"\"\"\r\n SELECT * FROM sqlite_master WHERE type='table';\r\n \"\"\").fetchall()\r\n table_names = [i[1] for i in data]\r\n return table_names\r\n return self._connectionController(_passer)",
"def query_tables(self):\n # Find all tables\n tables_q = \"SELECT name FROM sqlite_master WHERE type = 'table' AND name NOT LIKE \\'sqlite_%\\';\"\n tables = self.query(tables_q)\n # print(tables)\n return tables",
"def get_tables():\n return execute(\"SELECT name FROM sqlite_master WHERE type = 'table';\")",
"def getTables(self):\n\treturn self.dbNames",
"def listOSWTables(conn):\n conn_cursor = conn.cursor()\n conn.text_factory = str\n res = conn.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\n tables=[name[0] for name in res]\n return tables",
"def get_tables(self, db_name):\n pass",
"def show_tables(self):\n query = \"SELECT name FROM sqlite_master WHERE type = 'table'\"\n try:\n temp = self.__cur.execute(query)\n except Exception as e:\n self.__conn.rollback()\n raise e\n\n tables = []\n for x in temp:\n tables.append(x[\"name\"])\n del temp\n return tables",
"def get_tables(self):\n\t\tbuild = 'SELECT * FROM pg_catalog.pg_tables WHERE schemaname != \\'pg_catalog\\' AND schemaname != \\'information_schema\\';'\n\t\tself.cur.execute(build)\n\t\ttotal = self.cur.fetchall()\n\t\ttable_list = []\n\t\tfor a in total:\n\t\t\ttable_list.append(a[1])\n\t\treturn table_list",
"def list_tables(self):\n return LIST_TABLES(db=self.db)",
"def list_all_tables(db):\n # Get the tables which exist in the database\n db_tables = ex_sql_and_fetch(db, \"SELECT * FROM pg_catalog.pg_tables\")\n tables = [t[1] for t in db_tables]\n # Get the master tables from the Config\n config_tables = load_config()[db]['schemas'].keys()\n\n # Check to eliminate tables which don't exist from the Config\n relevant = [t for t in tables for c in config_tables if c in t]\n return relevant",
"def get_table_names(self):\n return self.engine.table_names()",
"def get_table_names(self,verbose=False):\n \n assert(self.connected)\n \n \n GET_TABLE_NAMES_COMMAND = \"SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = '{0}'\".format(self.config['database'])\n \n self.cursor.execute(GET_TABLE_NAMES_COMMAND)\n \n tables = []\n for row in self.cursor:\n tables.append(row[0])\n \n return tables",
"def tables(self):\n result = self.execute(self.commands.get_tables(self.name))\n return [x[0] for x in result]",
"def tables(self):\n return Table.objects.filter(schema__database=self)",
"def print_all_tables(self):\n conn = self.connect()\n cursor = conn.cursor()\n cursor.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\n print(cursor.fetchall())",
"def get_table_list(self, cursor):\n\n cursor.execute(\n \"\"\"\n SELECT c.relname, c.relkind\n FROM pg_catalog.pg_class c\n LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\n WHERE c.relkind IN ('r', 'v', '')\n AND n.nspname = '%s'\n AND pg_catalog.pg_table_is_visible(c.oid)\"\"\"\n % get_current_schema().schema_name\n )\n\n return [\n TableInfo(row[0], {\"r\": \"t\", \"v\": \"v\"}.get(row[1]))\n for row in cursor.fetchall()\n if row[0] not in self.ignored_tables\n ]",
"def list_tables(self, context=\"MYDB\"):\n q = 'SELECT Distinct TABLE_NAME FROM information_schema.TABLES'\n res = self.quick(q, context=context, task_name='listtables', system=True)\n # the first line is a header and the last is always empty\n # also, the table names have \" as the first and last characters\n return list(res[\"TABLE_NAME\"])",
"def get_table_list(self):\n # the \\\"{{}}\\\" is where the sql command will be added via a second `.format()`\n container_command = \"docker exec {} sh -c \\\"{{}}\\\"\".format(self.mysql_container)\n sql_command = \"mysql {} --execute='SHOW TABLES FROM {};'\".format(self.mysql_credentials, self.database_name)\n table_list = self.shell(container_command.format(sql_command))\n table_list = table_list.split(\"\\n\")\n assert table_list[0] == \"Tables_in_{}\".format(self.database_name)\n return table_list[1:]",
"def tables(self) -> list:\n return self.list_tables()",
"def list_tables(self, **kwargs):\n cursor = self.execute(\n self.list_tables_sql, dict({\"database\": self.uri.database}, **kwargs)\n )\n return [row[0] for row in cursor.fetchall()]",
"def show_tables(db_name):\n output = execute_sql(db_name, \"SELECT name FROM sqlite_master WHERE type='table';\")\n return output",
"def list_tables(database):\n config = load_config()\n tables = [x for x in config[database]['schemas']]\n\n return tables",
"def getTableNames(self):\n\n # The specific command depends on whether we are using mysql or sqlite\n if self.connector == 'mysql':\n sqlcmd = (\"SELECT table_name FROM INFORMATION_SCHEMA.TABLES \" +\n \"WHERE table_schema='\" + self.dbname + \"'\")\n else:\n sqlcmd = \"SELECT name FROM sqlite_master WHERE type='table'\"\n\n self._c.execute(sqlcmd)\n tbnames = [el[0] for el in self._c.fetchall()]\n\n return tbnames",
"def show_tables(self, name_db):\n conn, cursor = SQLDatabase.connect()\n try:\n cursor.execute(\"SHOW TABLES FROM {}\".format(name_db))\n self.all_tables = [table[0] for table in cursor.fetchall()]\n except mysql.connector.errors.ProgrammingError as err:\n print(\"{} : {} --> unknown\".format(err, name_db))\n finally:\n SQLDatabase.close(cursor, conn)\n\n return self.all_tables",
"def get_my_tables(self):\n qnum = self.master('sql', att={'type': 'table'}) # it's a Table._call_() function call\n if self.run():\n return (self.table_factory(self.get_table_info(result[0])) for result in self.results[qnum])\n else:\n print('An error has occurred when initializing the database.')",
"def getTableNames(self):\n\tif not self.dbNames:\n\t # get db table names from DB\n\t if self.dbType==\"sqlite\":\n\t query=\"SELECT name FROM sqlite_master WHERE type='table';\"\n\t elif self.dbType==\"mysql\":\n\t query=\"SHOW TABLES\"\n\t self.startTxn(\"SQLUtil.__init__\")\n\t tup=self.fetchAll(query)\n\t self.endTxn(\"SQLUtil.__init__\")\n\t for item in tup:\n\t self.dbNames.append(item[0])\n\treturn self.dbNames",
"def getTAPTables(self):\n\t\treturn [r[\"tablename\"] for r in\n\t\t\tself.readerConnection.queryToDicts(\n\t\t\t\t\"select tablename from dc.tablemeta where adql\")]",
"def Fetch_All_Table_Names(self, d_params=None):\n ctx = self.__Connect_To_Snowflake(d_params)\n all_tables = ctx.cursor().execute(\"show tables\")\n ctx.close()\n return [x[1] for x in all_tables]",
"def table_names(self, cursor=None):\r\n return [kind.key().name() for kind in Query(kind='__kind__').Run()]"
] | [
"0.7201292",
"0.7104701",
"0.6972192",
"0.6968997",
"0.69183815",
"0.6867139",
"0.6861783",
"0.6858579",
"0.6854755",
"0.6749029",
"0.6615288",
"0.65660226",
"0.6531382",
"0.6527741",
"0.65209895",
"0.65070254",
"0.65040284",
"0.6495937",
"0.64294606",
"0.64141154",
"0.6405535",
"0.6380727",
"0.633716",
"0.6313131",
"0.62918025",
"0.6288351",
"0.62863725",
"0.6248762",
"0.6238043",
"0.6218799"
] | 0.8611967 | 0 |
Load the current board state and winloss record for the given phone from the db | def readBoard(phone):
cursor = cnx.cursor()
query = ("SELECT board,win,loss,draw FROM connectfour WHERE phone=%s")
cursor.execute(query, (phone,))
row = cursor.fetchone()
if row is None:
cursor.close()
cursor = cnx.cursor()
query = ("INSERT INTO connectfour (phone,created,updated) VALUES (%s,CURRENT_TIMESTAMP,CURRENT_TIMESTAMP) ON DUPLICATE KEY UPDATE phone=VALUES(phone)")
cursor.execute(query, (phone,))
cursor.close()
cnx.commit()
return (None,0,0,0)
else:
(state,win,loss,draw) = row
cursor.close()
board = Board()
if not(state is None):
board.load(state)
if win is None:
win = 0
if loss is None:
loss = 0
if draw is None:
draw = 0
return (board,win,loss,draw) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def saveBoard(phone,board,win,loss,draw):\r\n cursor = cnx.cursor()\r\n query = (\"UPDATE connectfour SET board=%s,win=%s,loss=%s,draw=%s,updated=CURRENT_TIMESTAMP WHERE phone=%s\")\r\n cursor.execute(query, (board.save(),win,loss,draw,phone))\r\n cursor.close()\r\n cnx.commit()",
"def load(self):\n data = self.db.select_single_row(\n \"SELECT board_class, width, height, tile_size FROM board\")\n self.board.columns = int(data[1])\n self.board.rows = int(data[2])\n self.board.tile_size = int(data[3])\n self.board._loaded_from_db = True\n self.board.switch_board(self.board)\n return self.board",
"def load_database(self):\n # If there is already data, do not load\n if self:\n raise DatabaseError('Data already loaded!')\n\n # Gather all data from the table\n data = self.cursor.execute(\n 'SELECT unique_id, name, wins, time_stamp, '\n 'last_win FROM gungame_winners'\n )\n data = data.fetchall()\n\n # Are there no winners to add?\n if not data:\n return\n\n # Loop through all the past winners and their data\n for unique_id, name, wins, time_stamp, last_win in data:\n\n # Add the current winner to the database\n instance = self[unique_id]\n instance.name = name\n instance.wins = int(wins)\n instance.time_stamp = float(time_stamp)\n instance.last_win = float(last_win)",
"def load_game(self):\n self.game = db.get_game(self.game_id)",
"def load_state_from_db(self, db_name):\n if not os.path.exists(db_name + '.sqlite'):\n click.secho(\"Provided database is not existent\", fg='red')\n return -1\n self.del_me()\n engine = create_engine('sqlite:///' + db_name + '.sqlite')\n Session = sessionmaker()\n Session.configure(bind=engine)\n session = Session()\n all_people = session.query(Persons).all()\n all_rooms = session.query(Rooms).all()\n\n for room in all_rooms:\n if room.room_type == \"LivingSpace\":\n room_type = 'L'\n elif room.room_type == \"OfficeSpace\":\n room_type = 'O'\n self.create_room(room.name, room_type)\n\n for person in all_people:\n self.add_person(person.name, person.email,\n person.person_type.upper(), person.wants_accomodation)\n self.all_persons[person.email].person_id = person.person_id\n\n if self.all_persons[person.email].person_type == \"Staff\":\n self.reallocate_person(\n self.staff[person.email], person.allocated_office)\n elif self.all_persons[person.email].person_type == \"Fellow\":\n self.reallocate_person(\n self.fellows[person.email], person.allocated_office)\n\n if person.wants_accomodation == \"Y\":\n self.reallocate_person(\n person.email, person.allocated_living)",
"def load_board(request):\n required_fields = ['user_id', 'game_id', 'token']\n\n # Check if the post request contain the required fields\n if set(required_fields) != set(list(request.data.keys())):\n return Response({'error': str('Missing required fields!')}, status=status.HTTP_400_BAD_REQUEST)\n\n # POST Request content\n data = request.data\n\n if check_special_characters(str(data['user_id'])) or check_special_characters(str(data['game_id'])) \\\n or check_special_characters(str(data['token'])):\n return Response({'error': str('Unaccepted character passed!')},\n status=status.HTTP_400_BAD_REQUEST)\n\n # Here check if user_id matches the token with the database\n if not db.check_user(data['user_id'], data['token']):\n return Response({'error': str('UNAUTHORIZED')}, status=status.HTTP_401_UNAUTHORIZED)\n\n # Load the game from user's saved profile\n game_board = db.load_board(data['user_id'], data['game_id'])\n if not game_board:\n return Response({'error': 'you got some messed up arguments (NICK)'},\n status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n game_board['game_id'] = str(uuid.uuid1())\n\n # indicate that this board is being loaded from the profile\n game_board['profile_load'] = True\n\n # Here I am just going to move this board to active games using the api we already have.\n # Note that board is still saved on user's profile, but we are just creating a new active game.\n response_status = game_utils.create_board_db(game_board)\n if response_status['error']:\n return Response({'error': response_status['reason']},\n status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n active_game_id = response_status['game_id']\n\n # Here i am just going to return the game board itself from the active games\n # From this point front-end can continue using the Game Board API to interact with the game\n response_status = game_utils.load_board_db(active_game_id)\n if response_status['error']:\n return Response({'error': response_status['reason']},\n status=status.HTTP_400_BAD_REQUEST)\n\n # hide the UID used by data structure backend from user\n del response_status['game_board']['graph']['uid']\n\n return Response(response_status['game_board'])",
"def sync_with_database(self):\n # learn from db\n lports = self.nb_api.get_all(l2.LogicalPort)\n for lport in lports:\n port_id = \"{}:{}\".format(lport.lswitch.id, lport.id)\n self.cache_logical_port_by_port_id[port_id] = lport\n lrouters = self.nb_api.get_all(l3.LogicalRouter)\n for lrouter in lrouters:\n self.cache_logical_router_by_dpid[lrouter.id] = lrouter",
"def update_state_v1(self, dbsession, state):\n moving_player = self.active_player()\n board = self.update(state)\n table_game = dbsession.query(TableGame).filter(\n TableGame.game == board.id).first()\n table_board = TableBoard(\n board_state=dumps(tuple(map(tuple, board.board))),\n move_num=board._board.move_count,\n player=board.active_player(),\n game=board.id)\n if table_game: # TODO(grandquista)\n table_board.game_link.append(table_game)\n dbsession.add(table_board)\n if board:\n board.poke_player(False)\n return {'end': False}\n board.poke_player(True, moving_player)\n if board._board.has_kings():\n table_game.one_won = False\n table_game.two_won = False\n elif moving_player == table_game.player_one:\n table_game.two_won = False\n else:\n table_game.one_won = False\n board.close()\n return {'end': True}",
"def change(name, phone, db):\n database = load(db)\n if name in database:\n database[name] = phone\n pickle.dump(database, open(db, 'wb'))\n print(\"%s number changed to %r\" % (name, phone ))\n else:\n print(\"no such person %r in %r\" % (name, db))\n sys.exit(-1)",
"def load_status_table():",
"def database(self,id):\n\t\tdb = {1:('COMPUTER',1000.5,100),\n \t\t 2:('MOUSE',10.0,300),\n\t\t 3:('PENCIL BLUE',0.50,500),\n\t\t 4:('PENCIL RED',0.50,600),\n\t\t 5:('PENCIL WHITE',0.50,900),\n\t\t 6:('HEADPHONES',15.7,500),\n\t\t }\n\t\trow = (None,0.0,0)\n\t\ttry:\n\t\t\trow = db[id]\n\t\texcept:\n\t\t\tNone\n\t\treturn row",
"def add(name, phone, db):\n database = load(db)\n if name in database:\n print(\"%r already in %r\" % (name, db))\n sys.exit(-1)\n else:\n database[name] = phone\n database = OrderedDict(sorted(database.items()))\n pickle.dump(database, open(db, 'wb'))\n print(\"added '%s (%s)' to %r\" % (name, phone, db))",
"def db_update_entry():\n db = sh.open(the_phone_book_name, flag='c', writeback=True)\n name = get_name()\n if name in db:\n phone_number = get_phone_number(db[name.capitalize()])\n print(\"Updating existing entry ..... {name}\\n\".format(name=name))\n db[name.capitalize()] = phone_number\n db.sync()\n else:\n print_error()\n db.close()\n db_show_all()",
"def read_database(app):\n app.status.cursorToHourglass()\n app.central.closeAllSubWindows()\n app.database().scan()\n app.status.cursorToNormal() \n app.refresh()",
"def lobbyStage(self):\n # Organizes outbound data to clients into a dict\n print(\"(\" + str(self.HOST) + \", \" + str(self.PORT) +\"):: Starting lobby stage.\", file=self.logs)\n gameState = {\n \"connection\": str(self.PORT), \n \"ready\":False,\n \"start\":False,\n \"opponentPort\": None,\n }\n\n counter = 0\n while True:\n inboundData = self.socket.recvfrom(1024) # Gets bundle of data from clients\n data = inboundData[0] # Separates data from address\n \n ########\n self.bitsIn += sys.getsizeof(data)\n\n address = inboundData[1] # Separates address from data\n data = pickle.loads(data) # Unpickles data back into a python dict\n \n # Keeps track of how often the server recieves information from each client.\n updatedTime = time.time() \n self.clientUpdateTimes[str(address)] = updatedTime\n\n # If a new address connects, add it to the list of clients\n if address not in self.clients:\n self.clients.append(address)\n print(str(address)+ \":: New connection.\", file=self.logs)\n \n # If there are two players, the game is ready to start.\n if len(self.clients) == 2:\n gameState['ready'] = True \n for client in self.clients:\n if client != address:\n gameState['opponentPort'] = client \n \n if len(self.clients) == 1:\n gameState['ready'] = False\n gameState['opponentPort'] = None\n\n else:\n if data['command'] != \"\":\n print(str(address) +\"::\", data, file=self.logs) # Only prints out non-trivial data from clients \n \n # Handle commands from other servers\n if data['command'] == \"close\": # Ends the server\n break\n\n if data['command'] == 'ping': # Confirms connection to client servers\n print(\"(\" + str(address) +\")::\", self.clients, file=self.logs)\n \n if data['command'] == 'start':\n for client in self.clients: # Tells both player views to move on\n gameState['start'] = True # to the next stage\n outboundData = pickle.dumps(gameState)\n self.socket.sendto(outboundData, client)\n break\n\n # Packages up data and sends it back to the client\n outboundData = pickle.dumps(gameState)\n\n ######\n self.bitsOut += sys.getsizeof(outboundData)\n \n self.socket.sendto(outboundData, address)\n\n # Continuously saves logging information to a text file:\n self.logs.close()\n self.logs = open(str(self.filepath)+\"/_logs/\"+ str(self.PORT) + \".txt\", \"a+\")\n\n # Check client connections here\n self.checkClientConnections(time.time())",
"def save_board_state(self):\n self.board_states.append([copy.deepcopy(self.stock), copy.deepcopy(self.wp), \n copy.deepcopy(self.foundations), copy.deepcopy(self.tableaus)])",
"def loadFromBDD(self):\r\n query = \"SELECT star002_name,star002_laststation_star022station,star002_face,star002_coin,star002_iduser_star001,star002_zone_star011zone,star002_faction_star059 FROM star002_character WHERE star002_id = '\" + str(\r\n self.id) + \"'\"\r\n shimDbConnector.lock.acquire()\r\n instanceDbConnector = shimDbConnector.getInstance()\r\n cursor = instanceDbConnector.getConnection().cursor()\r\n cursor.execute(query)\r\n result_set = cursor.fetchall()\r\n for row in result_set:\r\n self.name = row[0]\r\n self.lastStation = row[1]\r\n self.face = row[2]\r\n self.coin = row[3]\r\n self.user = row[4]\r\n self.zoneId = row[5]\r\n self.faction = int(row[6])\r\n cursor.close()\r\n shimDbConnector.lock.release()\r\n self.loadMissionsFromBDD()\r\n self.loadShipFromBDD()",
"def load(self):\n db = CrawlDBI.DBI(dbtype='crawler')\n if self.rowid is not None:\n rows = db.select(table='checkables',\n fields=['rowid',\n 'path',\n 'type',\n 'cos',\n 'cart',\n 'ttypes',\n 'checksum',\n 'last_check',\n 'fails',\n 'reported'],\n where=\"rowid = ?\",\n data=(self.rowid,))\n else:\n rows = db.select(table='checkables',\n fields=['rowid',\n 'path',\n 'type',\n 'cos',\n 'cart',\n 'ttypes',\n 'checksum',\n 'last_check',\n 'fails',\n 'reported'],\n where=\"path = ?\",\n data=(self.path,))\n if 0 == len(rows):\n self.in_db = False\n elif 1 == len(rows):\n self.in_db = True\n rz = list(rows[0])\n self.rowid = rz.pop(0)\n self.path = rz.pop(0)\n self.type = rz.pop(0)\n self.cos = rz.pop(0)\n self.cart = rz.pop(0)\n self.ttypes = rz.pop(0)\n self.checksum = rz.pop(0)\n self.last_check = rz.pop(0)\n try:\n self.fails = rz.pop(0)\n except IndexError:\n self.fails = 0\n try:\n self.reported = rz.pop(0)\n except IndexError:\n self.reported = 0\n self.dirty = False\n else:\n raise StandardError(\"There appears to be more than one copy \" +\n \"of %s in the database\" % self)\n\n db.close()",
"def _load_state(self):\n \n if os.path.isfile(self.histFile):\n with open(self.histFile,'rb') as hf:\n oldFile = hf.read()\n \n if os.path.isfile(oldFile):\n self.dbFile.Value = oldFile",
"def loadDbFromDisk (self):\n currentThread=threading.currentThread()\n self._logIo(\"load-db-from-disk\").debug1(\"starting to load db. thread-id=%d\", currentThread.ident)\n\n startLoadingTime = time.time()\n if os.path.exists(self._dbFileFullNamePath):\n try:\n self._cidLastAccessTimeDict = a.infra.format.json.readFromFile(self._logIo, self._dbFileFullNamePath)\n self._logIo(\"read-db-from-disk\").debug1(\"loading db - reading from file time is: %.6f\", time.time() - startLoadingTime)\n startValidatingTime = time.time()\n self._scanLastAccessDictForJunk()\n self._logIo(\"validate-db\").debug1(\"loading db - validating data time is: %.6f\", time.time() - startValidatingTime)\n except Exception as ex:\n self._logIo(\"error-read-db-file\").error(\"error reading db file='%s'. exception: %s\", self._dbFileFullNamePath, ex)\n\n if os.path.exists(self._countersFileFullNamePath):\n try:\n countersData = a.infra.format.json.readFromFile(self._logIo, self._countersFileFullNamePath)\n \n # load prediction counters for presistency\n self.counters['numTotalPredictionRemovedBytes'] = countersData['numTotalPredictionRemovedBytes']\n except Exception as ex:\n self._logIo(\"error-read-counters-file\").error(\"error reading counters file='%s'. exception: %s\", self._countersFileFullNamePath, ex)\n self._logIo(\"read-db-from-disk\").debug1(\"loading db - total time is: %.6f\", time.time() - startLoadingTime)",
"def get_board(board_id):\n all_boards = [board for board in GRAPH_DB.find(\"board\")]\n board = filter(lambda b: b._id == board_id, all_boards)[0]\n return {\"ladders\": from_hackerrank_paths(board[\"ladders\"]),\n \"snakes\": from_hackerrank_paths(board[\"snakes\"])}",
"def reverse(phone, db):\n database = load(db)\n database = dict(zip(database.values(), database.keys()))\n\n if phone in database:\n print(\"%s (%s)\" % (database[phone], phone))\n else:\n print(\"no one found with number %r\" % phone)\n sys.exit(-1)",
"def load():\r\n global database\r\n global ranks\r\n \r\n osPlatform = (\"Windows\" if os.name == \"nt\" else \"Linux\" if os.name == \"posix\" else os.name)\r\n debug.write('Log file started at %s' % time.strftime(\"%A %d %B %Y - %H:%M:%S\"), 0, False)\r\n debug.write('\\n*******************************************************', 0, False)\r\n debug.write('[SourceRPG]: Turning your Server into a Role Playing Game', 0, False)\r\n debug.write('[SourceRPG]: Current Version - %s' % info.version, 0, False)\r\n debug.write('[SourceRPG]: Made by %s' % info.author, 0, False)\r\n debug.write('\\nSystem Info:', 0, False)\r\n debug.write('\\tOS: %s' % osPlatform, 0, False)\r\n debug.write('\\tEventscripts Version: %s' % es.ServerVar('eventscripts_ver'), 0, False)\r\n debug.write('\\tCorelib Version: %s' % es.ServerVar('es_corelib_ver'), 0, False)\r\n debug.write('\\tEventscript Tools Version: %s' % es.ServerVar('est_version'), 0, False)\r\n debug.write('\\tEventscripts Noisy: %s' % es.ServerVar('eventscripts_noisy'), 0, False)\r\n debug.write('\\tPopuplib version: %s' % popuplib.info.version, 0, False) \r\n \r\n cmdlib.registerSayCommand(\"rpgmenu\", sayCommands.mainMenu, \"Opens the rpg main menu\")\r\n cmdlib.registerSayCommand(\"rpgupgrade\", sayCommands.upgradeMenu, \"Opens the upgrade menu\")\r\n cmdlib.registerSayCommand(\"rpgsell\", sayCommands.sellMenu, \"Opens the sell menu\")\r\n cmdlib.registerSayCommand(\"rpghelp\", sayCommands.helpMenu, \"Opens the help menu\")\r\n cmdlib.registerSayCommand(\"rpgstats\", sayCommands.stats, \"Opens the stats menu for the user or another player\")\r\n cmdlib.registerSayCommand(\"rpgrank\", sayCommands.rank, \"Tells the player their rank or another player's rank\")\r\n cmdlib.registerSayCommand(\"rpgpopup\", sayCommands.togglePopup, \"Tells the player their rank or another player's rank\")\r\n cmdlib.registerSayCommand(\"rpgtop10\", sayCommands.top10, \"Sends the player the last updated top 10 scores\")\r\n \r\n es.server.cmd(\"exec sourcerpg/skill_loader.cfg\")\r\n \r\n es.server.cmd(\"exec sourcerpg/addon_loader.cfg\")\r\n \r\n skillConfig.write(True)\r\n skillConfig.execute(True, True)\r\n\r\n debug.write('[SourceRPG] Starting the popup creation', 0, False)\r\n\r\n \"\"\" Create the default popups which aren't unique to players \"\"\"\r\n rpgmenu = popuplib.easymenu(\"sourcerpg_rpgmenu\", \"_popup_choice\", popups.rpgmenu)\r\n rpgmenu.settitle(\"=== %s Menu ===\" % prefix)\r\n rpgmenu.addoption(1, \"Upgrade Skills\")\r\n rpgmenu.addoption(2, \"Sell Skills\")\r\n rpgmenu.addoption(3, \"RPG Help\")\r\n rpgmenu.addoption(4, \"RPG Stats\")\r\n rpgmenu.addoption(5, \"Reset Skills\")\r\n \r\n helpMenu = popuplib.easymenu('sourcerpg_help', '_popup_choice', popups.helpmenu)\r\n helpMenu.settitle('=== %s Help ===' % prefix)\r\n helpMenu.addoption(1, 'About SourceRPG')\r\n helpMenu.addoption(2, 'List of Commands')\r\n helpMenu.addoption(3, 'About SourceRPG Skills')\r\n helpMenu.addoption(4, 'Credit')\r\n helpMenu.submenu(10, \"sourcerpg_rpgmenu\")\r\n \r\n confirmation = popuplib.easymenu('sourcerpg_confirm', '_popup_choice', popups.confirm)\r\n confirmation.settitle(\"=== %s Reset Stats ===\" % prefix)\r\n confirmation.setdescription(\"\"\"Are you sure you want to remove\r\nyour skills? There is no chance\r\nor recovering them again!\"\"\")\r\n confirmation.addoption(True, \"Yes\")\r\n confirmation.addoption(False, \"No\")\r\n \r\n about = popuplib.create('sourcerpg_about')\r\n about.addline('=== About %s ===' % prefix)\r\n about.addline('-' * 30)\r\n about.addline('SourceRPG is a python coded mod')\r\n about.addline('for EventScripts 2+. It enables')\r\n about.addline('players to gain Levels, by gaining')\r\n about.addline('XP from certain events, such as')\r\n about.addline('planting the bomb, or killing')\r\n about.addline('another player. Each level gives')\r\n about.addline('%s Credits, which allows you to' % creditsReceived)\r\n about.addline('buy certain skills which aid you')\r\n about.addline('in killing other players.')\r\n about.addline('-' * 30)\r\n about.addline('->8. Back')\r\n about.addline('0. Cancel')\r\n about.submenu(8, 'sourcerpg_help')\r\n \r\n commandspopup = popuplib.create('sourcerpg_commands')\r\n commandspopup.addline(\"=== %s Commands ===\" % prefix)\r\n commandspopup.addline(\"-\" * 30)\r\n commandspopup.addline(\"rpghelp - displays the help menu\")\r\n commandspopup.addline(\"rpgmenu - displays the main menu\")\r\n commandspopup.addline(\"rpgrank - displays your RPG rank\")\r\n commandspopup.addline(\"rpgpopup - toggles on / off automatic popup display\")\r\n commandspopup.addline(\"rpgupgrade - upgrade skills\")\r\n commandspopup.addline(\"rpgsell - sell skills\")\r\n commandspopup.addline(\"rpgstats - display your stats\")\r\n commandspopup.addline(\"-\" * 30)\r\n commandspopup.addline(\"->8. Back\")\r\n commandspopup.addline(\"0. Cancel\")\r\n commandspopup.submenu(8, 'sourcerpg_help')\r\n \r\n creditmenu = popuplib.create('sourcerpg_creditmenu') \r\n creditmenu.addline('=== %s Credits ===' % prefix)\r\n creditmenu.addline('-' * 30)\r\n creditmenu.addline(info.author)\r\n creditmenu.addline(' Script Creator')\r\n creditmenu.addline(' ')\r\n creditmenu.addline('SumGuy14 and Murphey')\r\n creditmenu.addline(' Letting me use their Long Jump code')\r\n creditmenu.addline(' ')\r\n creditmenu.addline('SuperDave')\r\n creditmenu.addline(' He turned my failing SmogNade code into')\r\n creditmenu.addline(' a working code! Thank him for that skill.')\r\n creditmenu.addline(' ')\r\n creditmenu.addline('JoeyT2008 (Jordan Thomas)')\r\n creditmenu.addline(' Awesome scripter who made the database conversion')\r\n creditmenu.addline(' ')\r\n creditmenu.addline('EventScripts Community')\r\n creditmenu.addline(' Help and support, and such a good plugin.')\r\n creditmenu.addline('-' * 30)\r\n creditmenu.addline('8. Back')\r\n creditmenu.addline('0. Cancel')\r\n creditmenu.submenu(8, 'sourcerpg_help')\r\n \r\n debug.write('[SourceRPG] Popups created', 0, False)\r\n \r\n \r\n if int(turboMode):\r\n database = DATABASE_STORAGE_METHOD(\":memory:\")\r\n else:\r\n database = DATABASE_STORAGE_METHOD(databasePath)\r\n \r\n ranks = RankManager()\r\n \r\n \"\"\" If the script is loaded late then make sure all players are inserted \"\"\"\r\n if es.getplayercount():\r\n for player in es.getUseridList():\r\n players.addPlayer( player )\r\n \r\n es.server.queuecmd('mp_restartgame 1')\r\n\r\n if str( es.ServerVar('eventscripts_currentmap')):\r\n es_map_start({})\r\n\r\n \"\"\" If we want to save by intervals then create a repeat to save the database \"\"\"\r\n if str( saveType ) == \"intervals\":\r\n gamethread.delayedname(float(saveLength), 'sourcerpg_databasesave', saveDatabase)\r\n \r\n debug.write('[SourceRPG]: Finished Loading... Enjoy your stay!', 0, False)\r\n debug.write('*******************************************************\\n', 0, False)",
"def get_new_gamestate(self):",
"def loadBoard(self, boardName):\n\n\t\twith open(boardName+'.pkl', 'rb') as input:\n\t\t\tinputPickle = pickle.load(input)\n\t\t\thouseList = inputPickle.houseList\n\t\t\tbatteryList = inputPickle.batteryList\n\t\treturn houseList, batteryList",
"async def load_state(self):\n\n\t\twith open(os.path.join(\"config\", \"leaderboards.json\"), \"r+\") as leaderboards:\n\t\t\tself.leaderboards = json.loads(leaderboards.read())",
"def read_db(self):\n with open(self.filename, 'r') as database:\n data = json.load(database)\n self.data = data",
"def load_game(self):\n print('Game loaded!')\n return pickle.load(open(\"save.dat\", 'rb'))",
"def _load(self) -> None:\n self.record = self._saved_record\n self.counter = self._saved_counter\n self.current_objects = self._saved_objects",
"def new_game(self):\n\n self.board = {}"
] | [
"0.6271187",
"0.6174814",
"0.588046",
"0.5659565",
"0.5567539",
"0.55583787",
"0.5470912",
"0.53004444",
"0.5197919",
"0.5100611",
"0.5088175",
"0.5068241",
"0.50355256",
"0.5034419",
"0.50063074",
"0.49890283",
"0.49616575",
"0.4892639",
"0.4892557",
"0.48855442",
"0.48751444",
"0.48686203",
"0.48653716",
"0.48525947",
"0.48393083",
"0.4838797",
"0.48338577",
"0.48316517",
"0.47998416",
"0.47836825"
] | 0.75203294 | 0 |
Save the current board state and winloss record for the given phone to the db | def saveBoard(phone,board,win,loss,draw):
cursor = cnx.cursor()
query = ("UPDATE connectfour SET board=%s,win=%s,loss=%s,draw=%s,updated=CURRENT_TIMESTAMP WHERE phone=%s")
cursor.execute(query, (board.save(),win,loss,draw,phone))
cursor.close()
cnx.commit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def readBoard(phone):\r\n cursor = cnx.cursor()\r\n query = (\"SELECT board,win,loss,draw FROM connectfour WHERE phone=%s\")\r\n cursor.execute(query, (phone,))\r\n row = cursor.fetchone()\r\n if row is None:\r\n cursor.close()\r\n cursor = cnx.cursor()\r\n query = (\"INSERT INTO connectfour (phone,created,updated) VALUES (%s,CURRENT_TIMESTAMP,CURRENT_TIMESTAMP) ON DUPLICATE KEY UPDATE phone=VALUES(phone)\")\r\n cursor.execute(query, (phone,))\r\n cursor.close()\r\n cnx.commit()\r\n return (None,0,0,0)\r\n else:\r\n (state,win,loss,draw) = row\r\n cursor.close()\r\n board = Board()\r\n if not(state is None):\r\n board.load(state)\r\n if win is None:\r\n win = 0\r\n if loss is None:\r\n loss = 0\r\n if draw is None:\r\n draw = 0\r\n return (board,win,loss,draw)",
"def save_board_state(self):\n self.board_states.append([copy.deepcopy(self.stock), copy.deepcopy(self.wp), \n copy.deepcopy(self.foundations), copy.deepcopy(self.tableaus)])",
"def save_state(self):\n # add (turn number, active player, player 1, player 2) to game history\n # player 1 and player 2 contain data about active mods\n turn_number = self.turn_number\n player_1 = Class.copy_monster(self.player1)\n player_2 = Class.copy_monster(self.player2)\n # save which player's turn it is\n if self.current_player == self.player1:\n active_player = 'player 1'\n else:\n active_player = 'player 2'\n\n # add this information to history list\n self.history.append((turn_number, active_player, player_1, player_2))",
"def saveBoard(self, houseList, batteryList, boardName, width, height):\n\n\t\tboard = Board.board()\n\t\tboard.batteryList = batteryList\n\t\tboard.houseList = houseList\n\t\tboard.height = height\n\t\tboard.width = width\n\t\tboard.n_houses = len(houseList)\n\t\tboard.n_batteries = len(batteryList)\n\t\twith open(boardName+'.pkl', 'wb') as output:\n\t\t\tpickle.dump(board, output, pickle.HIGHEST_PROTOCOL)\n\t\t\t# pickle.dump(batteryList, output, pickle.HIGHEST_PROTOCOL)\n\t\treturn True",
"def save_board_to_db(ladders, snakes):\n new_board = GRAPH_DB.create(node(snakes=to_hackerrank_paths(snakes),\n ladders=to_hackerrank_paths(ladders)))[0]\n new_board.add_labels(\"board\")\n return new_board._id",
"def update_state_v1(self, dbsession, state):\n moving_player = self.active_player()\n board = self.update(state)\n table_game = dbsession.query(TableGame).filter(\n TableGame.game == board.id).first()\n table_board = TableBoard(\n board_state=dumps(tuple(map(tuple, board.board))),\n move_num=board._board.move_count,\n player=board.active_player(),\n game=board.id)\n if table_game: # TODO(grandquista)\n table_board.game_link.append(table_game)\n dbsession.add(table_board)\n if board:\n board.poke_player(False)\n return {'end': False}\n board.poke_player(True, moving_player)\n if board._board.has_kings():\n table_game.one_won = False\n table_game.two_won = False\n elif moving_player == table_game.player_one:\n table_game.two_won = False\n else:\n table_game.one_won = False\n board.close()\n return {'end': True}",
"def change(name, phone, db):\n database = load(db)\n if name in database:\n database[name] = phone\n pickle.dump(database, open(db, 'wb'))\n print(\"%s number changed to %r\" % (name, phone ))\n else:\n print(\"no such person %r in %r\" % (name, db))\n sys.exit(-1)",
"def save_db(self) -> None:",
"def save_board(request):\n required_fields = ['user_id', 'game_id', 'token']\n\n # Check if the post request contain the required fields\n if set(required_fields) != set(list(request.data.keys())):\n return Response({'error': str('Missing required fields!')}, status=status.HTTP_400_BAD_REQUEST)\n\n # POST Request content\n data = request.data\n\n # check for not allowed characters\n if check_special_characters(str(data['user_id'])) or check_special_characters(str(data['game_id'])) \\\n or check_special_characters(str(data['token'])):\n return Response({'error': str('Unaccepted character passed!')},\n status=status.HTTP_400_BAD_REQUEST)\n\n # Here check if user_id matches the token with the database\n if not db.check_user(data['user_id'], data['token']):\n return Response({'error': str('UNAUTHORIZED')}, status=status.HTTP_401_UNAUTHORIZED)\n\n # Load the game board from the database\n response_status = game_utils.load_board_db(data['game_id'])\n if response_status['error']:\n return Response({'error': response_status['reason']},\n status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n board = response_status['game_board']\n\n # Here append new game board to user's profile. Note that this board already have an ID.\n if not db.save_game(data['user_id'], board):\n return Response({'error': str('Error when saving the game!')}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n return Response({'status': 'success'})",
"def save(cls):\n logger.info(\"Saving phone messages\")\n messages = Service.get('Messages').messages\n data = [c.to_dict() for c in messages if isinstance(c, PhoneMessage)]\n Persistance('messages/phone').save(data)\n yield None",
"def saveDatabase():\r\n debug.write(\"saveDatabase processing\", 1)\r\n \"\"\" Only process if turbo mode is off \"\"\"\r\n if not currentTurboMode:\r\n debug.write(\"turbo mode off, process the save\", 1)\r\n \"\"\" Update all the player's stats gained and commit the database\"\"\"\r\n for player in players:\r\n debug.write(\"Commiting indivudal players to the virtual database: %s\" % player.name, 2)\r\n player.commit()\r\n debug.write(\"Attempting to save the database itself\", 1)\r\n database.save()\r\n debug.write(\"SQLite database saved\", 1)\r\n debug.write(\"Creating the event\", 1)\r\n \"\"\" Create and fire the event \"\"\"\r\n values = {\"type\":(\"setstring\", str(saveType))}\r\n gamethread.delayed(0, fireEvent, (\"sourcerpg_databasesaved\", values))\r\n debug.write(\"Event fired\", 1)\r\n \r\n \"\"\" Create a loop if we need to \"\"\"\r\n if str( saveType ) == \"intervals\":\r\n gamethread.delayedname(float(saveLength), 'sourcerpg_databasesave', saveDatabase)\r\n debug.write(\"saveDatabase processed\", 1)",
"def save(self, db):\n pass",
"def game_over(self, won=True):\n if won is True:\n self.game[\"game_status\"] = self.WON\n else:\n self.game[\"game_status\"] = self.DISCONNECTED\n db.save_game(self.game_id, self.game)",
"def save_state():\n logger.debug(\"called\")\n pwd_gate.save()\n preferences.save()\n shareBuffer.save()\n contacts.save()\n secrets.save()",
"def writeState(self, saveState: ghidra.framework.options.SaveState) -> None:\n ...",
"def db_update_entry():\n db = sh.open(the_phone_book_name, flag='c', writeback=True)\n name = get_name()\n if name in db:\n phone_number = get_phone_number(db[name.capitalize()])\n print(\"Updating existing entry ..... {name}\\n\".format(name=name))\n db[name.capitalize()] = phone_number\n db.sync()\n else:\n print_error()\n db.close()\n db_show_all()",
"def save(self, db):\n db.query(\n \"INSERT INTO rooms (name, type) VALUES(:name, :type)\",\n name=self.name, type='O'\n )",
"def record(self, game):\n\n\t\topp = game.opponents[self]\n\t\tself.memory[opp] = game.get_last_move(opp)\n\t\tself.movesThisGen.append(game.get_last_move(self))\n\t\tif len(self.memory.keys()) > 10:\n\t\t\tself.memory.popitem()\n\t\tself.games_played += 1",
"def save_model(self, request, obj, form, change):\n setNewStatues(obj.id)\n # try:\n # oldDetail = models.StatusDetails.objects.get(hostID=obj.id, isTail=1)\n # except :\n # oldDetail=None\n # if oldDetail !=None:\n # oldDetail.isTail = False\n # oldDetail.save()\n obj.save()\n if obj.status!=None:\n newInfo=user.models.StatusDetails.objects.create(statu_id=obj.status.code,time=datetime.now(),hostID_id=obj.id,code=user.models.StatusDetails.objects.filter(hostID=obj.id).count()+1)",
"def record(self, game):\n\n\t\topp = game.opponents[self]\n\t\tself.memory[opp.tag] = game.get_last_move(opp)\n\t\tself.movesThisGen.append(game.get_last_move(self))\n\t\tself.games_played += 1",
"def store_word_state(pid, sentence_number, word_number):\n try:\n #search for an existing state for the pid\n query = {'pid': pid}\n cursor = database['WordsState'].find_one(query)\n if cursor is None:\n database.WordsState.insert_one({'pid': pid, 'sentence_number': sentence_number, 'word_number': word_number})\n else:\n database.WordsState.update_one(query, {'$set': {'pid': pid, 'sentence_number': sentence_number, 'word_number': word_number}})\n return {'status': 1, 'data': None}\n except Exception as e:\n return {'status': -1, 'data': str(e)}",
"def saveGame(self) -> None:\n self.state[\"phase\"] = self._phase\n\n state_as_string = json.dumps(self.state)\n with open(self.save_location, \"w\") as File:\n File.write(state_as_string)",
"def save():",
"def save(self, db):\n db.query(\n \"INSERT INTO rooms (name, type) VALUES(:name, :type)\",\n name=self.name, type='L'\n )",
"def record_tie(cls):\n d = {}\n for row in range(0, cls.size):\n for col in range(0, cls.size):\n token = cls.board[row][col]\n d[token] = token\n if cls.winner is None and cls.empty not in list(d.keys()):\n cls.winner = 'Tie'",
"def make_save(self):\n\t\tsave = {}\n\t\tsave['p'] = self.p\n\t\tsave['injail'] = self.injail.copy()\n\t\tsave['tile'] = self.tile.copy()\n\t\tsave['bal'] = self.bal.copy()\n\t\tsave['goojf'] = self.goojf.copy()\n\t\tsave['isalive'] = self.isalive.copy()\n\t\tsave['jailturn'] = self.jailturn.copy()\n\t\tsave['ownedby'] = self.ownedby.copy()\n\t\tsave['numhouse'] = self.numhouse.copy()\n\t\tsave['ismortgaged'] = self.ismortgaged.copy()\n\t\tsave['num'] = self.num\n\t\tsave['numalive'] = self.numalive\n\t\tsave['uid'] = self.uid.copy()\n\t\tsave['freeparkingsum'] = self.freeparkingsum\n\t\tself.autosave = save",
"def add(name, phone, db):\n database = load(db)\n if name in database:\n print(\"%r already in %r\" % (name, db))\n sys.exit(-1)\n else:\n database[name] = phone\n database = OrderedDict(sorted(database.items()))\n pickle.dump(database, open(db, 'wb'))\n print(\"added '%s (%s)' to %r\" % (name, phone, db))",
"def save_database(app):\n app.database().save()\n app.status.message('Finished saving..')",
"def _save_state(self):\n with open(self.histFile,'wb') as hf:\n hf.write(self.dbFile.Value)",
"def save(self, game):\n try:\n with open(self.filename, mode='w+') as file:\n # First char in the file is the next player\n file.write(game.next_player)\n # Then the board as a string of 64 characters\n file.write(str(game.board))\n\n except IOError as err:\n print(f\"Error saving file: {err}\")"
] | [
"0.6470638",
"0.6128905",
"0.5942916",
"0.58818704",
"0.579189",
"0.5678843",
"0.5672766",
"0.5644867",
"0.56161314",
"0.5545671",
"0.5525992",
"0.5502162",
"0.5501767",
"0.54918563",
"0.54908663",
"0.5488081",
"0.54879624",
"0.5451699",
"0.54198986",
"0.5358391",
"0.5348812",
"0.53302336",
"0.53283215",
"0.53165865",
"0.53107905",
"0.53055274",
"0.52904254",
"0.5249873",
"0.5227574",
"0.5223113"
] | 0.8068914 | 0 |
Process all the files matched with the `files_pattern` and output the results in `output` | def process_files(files_pattern, output, options=None):
if options is None:
options = {}
queue = Queue(100)
files = glob.glob(files_pattern, recursive=True)
total_count = len(files)
logging.info("starting to parse %s files", total_count)
write_results_process = Process(target=write_results, args=(queue, output, total_count))
write_results_process.start()
pool = Pool(None, process_file_init, [queue, options])
pool.map(process_file, files)
pool.close()
pool.join()
queue.put(None)
write_results_process.join()
logging.info("successfully processed %s files", queue.get()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process_files(self):\n matcher = self.choose_algorithm()\n # process one file at the time for better memory management\n for i, element in enumerate(self.input):\n filepath, _ = element\n\n try:\n with open(filepath, \"r\", encoding=\"utf-8\") as readfile:\n for line in readfile:\n matcher.find_match(line, self.case_insensitive)\n\n # collect unreadeable files for error log\n except Exception:\n self.errors.append(str(filepath))\n\n # copy results and reset matcher for next file\n self.__results = matcher.results\n\n if self.counter:\n self.__results = matcher.counts\n\n matcher.reset()\n\n # output - print or json\n if self.results:\n self.output(element)\n\n # if json print progress bar\n if self.json:\n self.progress_bar(i+1, len(self.input), prefix=\"Matching:\",\n fixed_len=True, length=40)",
"def process(self, matches, tag):\n if isinstance(matches, str):\n matches = [matches]\n\n done = set()\n for match in matches:\n processed = False\n for path in self.dirs:\n for file in sorted(path.glob(match)):\n if file.name in done:\n continue\n self.__log.info('file %r matches %r' % (str(file), match))\n processed = True\n done.add(file.name)\n yield from str(file) >> tag >> self.out\n if not processed:\n raise ValueError('no matching files found for %r' % match)",
"def treat(input, output):\n files = find(input)\n acc = []\n for file in files:\n fileInfo = extract(file)\n out = makeOutputPath(output, fileInfo[\"path\"], fileInfo[\"filename\"])\n if not out == None:\n fileInfo[\"outPath\"] = out\n acc += [fileInfo]\n return acc",
"def get_files(year, month, input_pattern, out_pattern, n_domains=4):\n in_pattern = [input_pattern.format(year=year, month=str(month).zfill(2),\n domain=i)\n for i in range(1, n_domains + 1)]\n input_files = {i: sorted(glob.glob(in_pattern[i]))\n for i in range(n_domains)}\n out_pattern = out_pattern.format(year=year, month=str(month).zfill(2))\n out_files = [os.path.join(out_pattern,\n os.path.basename(input_files[0][i]).replace(\n 'custom_wrfout_d01', 'stitched_wrfout'))\n for i in range(len(input_files[0]))]\n return input_files, out_files",
"def process_files(files, options):\n\n # do a santity check on the output before doing all of that processing:\n if os.path.isdir(options.output):\n raise IsADirectoryError(errno.EISDIR, options.output)\n\n output = io.StringIO()\n actual_output = None\n\n if options.package:\n output.write(f'(cl:in-package #:{options.package})\\n\\n')\n\n try:\n for f in files:\n output.write(f\";; next section imported from file {f}\\n\\n\")\n _process_file(f, output, options)\n\n actual_output = ProcessOptions.output_file_from_option(options, 'w')\n\n output_text = output.getvalue().rstrip()\n actual_output.write(output_text)\n actual_output.write('\\n')\n finally:\n output.close()\n if actual_output and not (actual_output == sys.stderr or actual_output == sys.stdout):\n actual_output.close()",
"def process_all_files():\n src_files = get_doc_files()\n\n for src_pathname in src_files:\n if src_pathname.suffix in MARKDOWN_EXTENSIONS:\n process_file_markdown(src_pathname)\n elif src_pathname.suffix in STATIC_ASSET_EXTENSIONS:\n process_file_copytodest(src_pathname)",
"def do_files(self, args):\n file_names = self.regexprutils.get_file_names()\n print 'File names:'\n for name in file_names:\n print ' %s' % (name, )",
"def regex_filter_list(file_list, pattern, output=True):\n r = re.compile(pattern)\n matches = filter(r.match, file_list)\n\n if (len(matches) > 0 and output is True):\n #print colors.BLUE + '\\033[1m' + \"matches:\" + '\\033[0m'\n for match in matches:\n print colors.BLUE + match + colors.ENDC\n\n return matches",
"def process_files(path, patterns, callback):\n stats = dict.fromkeys(patterns, 0)\n for pattern, line_count in match_filenames(\n path, patterns, callback):\n stats[pattern] += line_count\n return stats",
"def process_files(path, patterns, callback):\r\n stats = dict.fromkeys(patterns, 0)\r\n for pattern, line_count in match_filenames(\r\n path, patterns, callback):\r\n stats[pattern] += line_count\r\n return stats",
"def match_files(patterns, files):\n\tall_files = files if isinstance(files, collections.Container) else list(files)\n\treturn_files = set()\n\tfor pattern in patterns:\n\t\tif pattern.include is not None:\n\t\t\tresult_files = pattern.match(all_files)\n\t\t\tif pattern.include:\n\t\t\t\treturn_files.update(result_files)\n\t\t\telse:\n\t\t\t\treturn_files.difference_update(result_files)\n\treturn return_files",
"def add_files(\n self,\n pattern: str,\n ) -> None:\n matches = glob.glob(pattern, recursive=True)\n for match in sorted(matches):\n self.add_file(match)",
"def extract_tars(file_pattern, path_in, path_out):\n for f in glob.glob(os.path.join(path_in, file_pattern)):\n shutil.unpack_archive(f, path_out)",
"def match_filenames(path, patterns, callback):\n for directory in walk(path):\n for pattern in patterns:\n for filename in iglob(os.path.join(directory, pattern)):\n line_count = count_lines(filename)\n callback(filename, line_count)\n yield pattern, line_count",
"def match_filenames(path, patterns, callback):\r\n for dir in walk(path):\r\n for pattern in patterns:\r\n for filename in iglob(os.path.join(dir, pattern)):\r\n line_count = count_lines(filename)\r\n callback(filename, line_count)\r\n yield pattern, line_count",
"def process(self, args):\n for benchmark_file in args.benchmark_files:\n self.process_individual_file(benchmark_file)\n self.total_files += 1",
"def cat_files(files, output):\n for file in files:\n with open(file, 'r') as fd:\n shutil.copyfileobj(fd, output)",
"def cat_files(files, output):\n for file in files:\n with open(file, 'r') as fd:\n shutil.copyfileobj(fd, output)",
"def recognize_files(list_of_filenames):\n reg_exp = define_regex()\n pattern = re.compile(reg_exp) \n matched = []\n for filename in list_of_filenames:\n match = pattern.match(filename)\n if match != None:\n matched.append(filename)\n return matched",
"def LocateFiles(pattern, root=os.curdir):\n for path, _, files in os.walk(os.path.abspath(root)):\n for filename in fnmatch.filter(files, pattern):\n yield os.path.join(path, filename)",
"def results_files(self, pattern=None, regex=None):\n return self._files_in_subdir(self.results_dir, pattern, regex)",
"def compile_filename_patterns(pattern_list):\n\n pats=list(pattern_list)\n for i in range(len(pats)):\n if isinstance(pats[i],str):\n if pats[i].startswith('re:'):\n pats[i]=pats[i][3:]\n else:\n pats[i]=fnmatch.translate(pats[i])\n pats[i]=re.compile(pats[i])\n return pats",
"def get_output_files(self, action):\n assert action == \"run\", \"Unsupported action\"\n return expand(self.base_pattern_out, ext=self.extensions)",
"def open_dir(input_path, patterns):\r\n for ext in patterns:\r\n for file in Path(input_path).glob('**/*.' + ext):\r\n yield file",
"def batch(infolder, outfile): # type: (str, str) -> None\n\n if not os.path.isdir(infolder):\n return\n\n results = []\n\n for filename in os.listdir(infolder):\n print('Processing ' + filename)\n curresults = []\n if filename.endswith('.txt'):\n with open(os.path.join(infolder, filename), 'r') as curfile:\n curdata = curfile.read() + '\\n'\n curresults = processClauseText(curdata, 'text')\n elif filename.endswith('.pdf'):\n with open(os.path.join(infolder, filename), 'rb') as curfile:\n curdata = base64.b64encode(curfile.read()).decode()\n curresults = processClauseText(curdata, 'pdf')\n elif filename.endswith('.docx'):\n with open(os.path.join(infolder, filename), 'rb') as curfile:\n curdata = base64.b64encode(curfile.read()).decode()\n curresults = processClauseText(curdata, 'word')\n if len(curresults) > 0:\n for result in curresults:\n result['filename'] = filename\n results.extend(curresults)\n\n if outfile is not None:\n with open(outfile, 'w') as outfile:\n json.dump(results, outfile, indent=2)",
"def substitute(files: str, pattern: str, replacement: str):\n with fileinput.input(\n files=glob.glob(files, recursive=True), inplace=True\n ) as file:\n for line in file:\n print(re.sub(pattern, replacement, line), end='')",
"def pattern_matching(pattern_base, cc_pattern_base):\n papers = [os.path.join(target_folder, paper) for paper in os.listdir(target_folder) if \".xml\" in paper]\n \n for paper in papers:\n paper_text = open(paper[:paper.index('.')]+\".txt\", 'r').read()\n \n annotator = detect_change_events(paper, pattern_base, paper_text) \n annotator = detect_cause_correlation(paper_text, cc_pattern_base, annotator)\n \n # Write the annotations to file\n with open(paper[:paper.index('.')]+\".ann\", 'w') as annfile:\n for annotation in annotator.annotations:\n annfile.write(annotation+\"\\n\")",
"def cat(config, input):\n for file in input:\n while True:\n output = file.read()\n if not output:\n break\n m = SearchMatches(file, output, config.regex, config.color, config.underline)\n m.print_match_lines()",
"def list_and_filter(self, pattern, root_path):\n for path, dirs, files in os.walk(os.path.abspath(root_path)):\n for filename in fnmatch.filter(files, pattern):\n yield os.path.join(path, filename)",
"def get_result_files(self):\n name_pattern = \"{mapper}.{ngs_library.name}\"\n yield from self._yield_result_files(\n os.path.join(\"output\", name_pattern, \"out\", name_pattern + \"{ext}\"), ext=EXT_VALUES\n )\n yield from self._yield_result_files(\n os.path.join(\"output\", name_pattern, \"log\", \"{mapper}.{ngs_library.name}.{ext}\"),\n ext=(\n \"log\",\n \"conda_info.txt\",\n \"conda_list.txt\",\n \"log.md5\",\n \"conda_info.txt.md5\",\n \"conda_list.txt.md5\",\n ),\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"bam_qc\", name_pattern + \".bam.{report}.txt\"\n ),\n report=(\"bamstats\", \"flagstats\", \"idxstats\"),\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"bam_qc\", name_pattern + \".bam.{report}.txt.md5\"\n ),\n report=(\"bamstats\", \"flagstats\", \"idxstats\"),\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"bam_qc\", name_pattern + \".bam.bamstats.html\"\n )\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"bam_qc\", name_pattern + \".bam.bamstats.html.md5\"\n )\n )\n\n for sheet in self.shortcut_sheets:\n for ngs_library in sheet.all_ngs_libraries:\n if ngs_library.name in self.ngs_library_to_kit:\n extraction_type = ngs_library.test_sample.extra_infos[\"extractionType\"]\n suffix = (\n \"_long\"\n if ngs_library.extra_infos[\"seqPlatform\"] in (\"PacBio\", \"ONP\")\n else \"\"\n )\n # Per-sample target coverage report.\n yield from expand(\n os.path.join(\n \"output\", name_pattern, \"report\", \"cov_qc\", name_pattern + \".{ext}\"\n ),\n mapper=self.config[\"tools\"][extraction_type.lower() + suffix],\n ngs_library=[ngs_library],\n ext=[\"txt\", \"txt.md5\"],\n )\n yield \"output/target_cov_report/out/target_cov_report.txt\"\n yield \"output/target_cov_report/out/target_cov_report.txt.md5\"\n if (\n self.config[\"picard_hs_metrics\"][\"path_targets_interval_list\"]\n and self.config[\"picard_hs_metrics\"][\"path_baits_interval_list\"]\n ):\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"picard_hs_metrics\", name_pattern + \".txt\"\n )\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"picard_hs_metrics\", name_pattern + \".txt.md5\"\n )\n )\n if self.config[\"compute_coverage_bed\"]:\n yield from self._yield_result_files(\n os.path.join(\"output\", name_pattern, \"report\", \"coverage\", name_pattern + \"{ext}\"),\n ext=(\".bed.gz\", \".bed.gz.tbi\"),\n )\n else:\n print(\n \"Genome-wide coverage BED generation disabled\", file=sys.stderr\n ) # pragma: no cover"
] | [
"0.74225605",
"0.6755515",
"0.67088324",
"0.6494828",
"0.64312804",
"0.6383366",
"0.62620753",
"0.6241482",
"0.6226911",
"0.62268376",
"0.61927044",
"0.61892426",
"0.6179227",
"0.61475813",
"0.6125794",
"0.597787",
"0.59764105",
"0.59764105",
"0.5958008",
"0.5955142",
"0.5944501",
"0.5938614",
"0.5934259",
"0.5884393",
"0.5828534",
"0.5824436",
"0.5822407",
"0.5796575",
"0.5790578",
"0.5785282"
] | 0.8119285 | 0 |
displaytime = 15000 | 30000 | 60000 | 120000| 300000 | 600000 | 1800000 displaytime default is 1800000 | def set_display_time(log_mes,displaytime = 1800000):
kill_adb_uiautomator_block_old()
if int(get_screen_off_time(log_mes)) == displaytime:
if int(displaytime) >= 60000:
log_mes.info( 'screen off time is already %s mins'%(displaytime/60000))
else:
log_mes.info('screen off time is already %s secs'%(displaytime/1000))
else:
os.system('adb shell am start -a android.settings.DISPLAY_SETTINGS')
device(text="Sleep").click()
kill_adb_uiautomator_block_old()
if int(displaytime) >= 60000:
device(text="%s minutes"%(displaytime/60000)).click()
else:
device(text="%s seconds"%(displaytime/1000)).click()
time.sleep(1)
os.system("adb shell am force-stop com.android.settings") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def print_on_display(self, a_display, a_last_time_display): #pylint: disable-msg=R0201 \n current_time = datetime.datetime.utcnow()\n if not a_last_time_display:\n a_display.print_screen(self.mem_db, current_time, self._sort_criteria)\n return current_time\n else:\n if current_time - a_last_time_display > datetime.timedelta(seconds=2):\n \n #clean database \n self.remove_expired_records(self.mem_db)\n \n a_display.print_screen(self.mem_db, current_time, self._sort_criteria)\n return current_time\n else:\n return a_last_time_display",
"def display_time(self, display='LEDScreen'):\r\n self.bin_time = self._update_time()\r\n wide = False # defines one or two LEDS for display\r\n if display == 'LEDScreen':\r\n if not wide:\r\n for frame_updates in range(30):\r\n for time_slot in range(len(self.bin_time)):\r\n for bit in range(len(self.bin_time[time_slot])):\r\n if self.bin_time[time_slot][bit] == 1:\r\n self.display.light_led(6 - time_slot, 6 - bit, 0.001)\r\n else:\r\n for frame_updates in range(30):\r\n for time_slot in range(3):\r\n for bit in range(6):\r\n if self.bin_time[time_slot][bit] == 1:\r\n coord = 2 * time_slot\r\n self.display.light_led(7 - coord, 5 - bit, 0.0001)\r\n self.display.light_led(7 - coord - 1, 5 - bit, 0.0001)\r\n\r\n else:\r\n for time_slot in range(3):\r\n if time_slot == 0:\r\n current_leds = self.second_leds\r\n elif time_slot == 1:\r\n current_leds = self.minute_leds\r\n else:\r\n current_leds = self.hour_leds\r\n\r\n bin_position = 0\r\n for pin in range(len(current_leds)):\r\n bin_value = self.bin_time[time_slot][bin_position]\r\n if bin_value > 0:\r\n current_leds[bin_position].on()\r\n else:\r\n current_leds[bin_position].off()\r\n bin_position += 1\r\n return",
"def ShowTime():\n ClearDisplay()\n DisplayMsg('{:^16}'.format(\"CLOCK\"), 8)\n strData = GetTime().split(\" \")\n DisplayMsg('{:^16}'.format(strData[0]), 32)\n DisplayMsg('{:^16}'.format(strData[1]), 40)\n display.show()",
"def show_time(self):\n hour = str(datetime.datetime.now().strftime(\"%H\"))\n minute = str(datetime.datetime.now().strftime(\"%M\"))\n\n hour1 = int(hour[0])\n hour2 = int(hour[1])\n minute1 = int(minute[0])\n minute2 = int(minute[1])\n\n self.light_number(self.numbers[hour1], [0, 5])\n self.light_number(self.numbers[hour2], [0, 0])\n self.light_number(self.numbers[minute1], [5, 5])\n self.light_number(self.numbers[minute2], [5, 0])",
"def change_stopwatch(timez):\r\n\r\n m = timez // 60\r\n s2 = timez % 60\r\n s1 = 0 if s2 < 10 else \"\"\r\n now = f\"{m}:{s1}{s2}\"\r\n stopwatch.configure(text=now)",
"def display_time(self, time):\n pygame.draw.rect(self.screen, self.font_fgcolor, self.time_rect)\n self.screen.blit(self.small_font.render(\"Elapsed time: %.0f s\" % time, -1, (0, 0, 0)), (5, 720))\n pygame.display.update(self.time_rect)\n return",
"def get_time():\r\n return datetime.datetime.now().strftime(\"%H\")+\":\"+datetime.datetime.now().strftime(\"%M\")+\":\"+datetime.datetime.now().strftime(\"%S\")",
"def showClock(hour: int, min: int):\n pass",
"def printTime(t):\n if t < 2 * MINUTE:\n return \"%d seconds\" % (t / SECOND)\n if t < 5 * HOUR:\n return \"%d minutes\" % (t / MINUTE)\n if t < 3 * DAY:\n return \"%d hours\" % (t / HOUR)\n if t < YEAR:\n return \"%d days\" % (t / DAY)\n if (t % YEAR) == 0:\n return \"%d years\" % (t / YEAR)\n else:\n return \"%5.1f years\" % (t / YEAR)",
"def display_form(self) -> str:\n\n hours, minutes, seconds, centi_secs = Time.centi_seconds_to_hours_minutes_seconds_millis(self.centi)\n\n display_time = \"\"\n was_non_zero_before = False\n if hours > 0:\n display_time += str(hours) + \":\"\n was_non_zero_before = True\n\n if minutes > 0:\n if was_non_zero_before:\n minutes_str = Time.ten(minutes)\n else:\n minutes_str = str(minutes)\n was_non_zero_before = True\n display_time += minutes_str + \":\"\n\n if was_non_zero_before:\n seconds_str = Time.ten(seconds)\n else:\n seconds_str = str(seconds)\n display_time += seconds_str + \".\"\n\n display_time += Time.ten(centi_secs)\n\n return display_time",
"def time_str(num):\n if num > 3600:\n return \"%0.2f hrs\" % (num / 3600)\n elif num > 60:\n return \"%0.2f mins\" % (num / 60)\n else:\n return \"%d seconds\" % num",
"def runtime_cal(start,end) :\n run_time = end - start\n mm = int(run_time/60)\n ss = round(run_time%60)\n return mm, ss",
"def print_time(t):\n print(\"Time is %.2d:%.2d:%.2d\"%(t.hour,t.minute,t.second))",
"def render_time(dt):\n return dt.strftime('%H:%M:%S')",
"def tick(self):\r\n if self.display_seconds:\r\n new_time = time.strftime('%H:%M:%S')\r\n else:\r\n new_time = time.strftime('%I:%M %p').lstrip('0')\r\n if new_time != self.time:\r\n self.time = new_time\r\n self.display_time = self.time\r\n self.config(text=self.display_time)\r\n self.after(200, self.tick)",
"def print_time(self):\n print('%.2d:%.2d:%.2d' % (self.hour, self.minute, self.second))",
"def get_time_string(time):\r\n mins = time // 60\r\n secs = time % 60\r\n time_string = ''\r\n\r\n if mins < 10:\r\n time_string += ' '\r\n elif mins < 100:\r\n time_string += ' '\r\n\r\n time_string += '%dm ' % mins\r\n\r\n if secs < 10:\r\n time_string += ' '\r\n\r\n time_string += '%ds' % secs\r\n\r\n return time_string",
"def tick(self):\n if self.display_seconds:\n new_time = time.strftime('%I:%M:%S %p')\n else:\n new_time = time.strftime('%I:%M:%S %p').lstrip('0')\n if new_time != self.time:\n self.time = new_time\n self.display_time = self.time\n self.config(text=self.display_time)\n self.after(200, self.tick)",
"def get_time(self):\n now = datetime.datetime.now()\n hour = 12 if now.hour % 12 == 0 else now.hour % 12\n meridiem = \"AM\" if now.hour < 12 else \"PM\"\n return \"%d:%02d %s\" % (hour, now.minute, meridiem)",
"def time_unit(self):\n self.skip_over_label['text'] = \"Start at (mins:secs): \"\n self.sampled_rate_label['text'] = \"Sample every (secs): \"\n self.total_frames_label['text'] = \"End at (mins:secs): \"",
"def convert_time(min, sec):\n # Updated 11/19/16 \n total_time = min*60\n total_time = total_time + sec\n \n return str(total_time)+'.0' # string because being passed to GUI",
"def format_time(self, time):\n hours = time // 3600\n time = time - hours*3600\n minutes = time // 60\n seconds = time - minutes*60\n return ('%d:%d:%d' %(hours, minutes, seconds))",
"def reportTimes(self):\n outputTime = \"Global Time\\t\"+str(self.globalTime/ 60.0)+ \\\n \"\\nMatching Time\\t\" + str(self.globalMatching/ 60.0)+ \\\n \"\\nDeletion Time\\t\" + str(self.globalDeletion/ 60.0)+ \\\n \"\\nSubsumption Time\\t\" + str(self.globalSubsumption/ 60.0)+ \\\n \"\\nSelection Time\\t\"+str(self.globalSelection/ 60.0)+ \\\n \"\\nEvaluation Time\\t\"+str(self.globalEvaluation/ 60.0) + \"\\n\"\n\n return outputTime",
"def logging_time(self, cur_res_val=0):\n self.fixed_val = self.new_val\n self.minutes_val += 1\n \n if cur_res_val:\n if self.cur_hour == 23:\n self.time_counter[str(0)] = 0\n else:\n self.time_counter[str(self.cur_hour+1)] = 0\n if cur_res_val < 30:\n self.time_counter[str(self.time_hour)] = self.minutes_val\n self.minutes_val = 0\n self.new_hour_flag = False\n elif cur_res_val >= 30:\n if self.time_hour - self.cur_hour:\n self.time_counter[str(self.cur_hour)] = self.minutes_val\n self.minutes_val = 0\n self.new_hour_flag = False\n print(self.time_counter)",
"def _get_display_duration(self):\n duration = self.duration\n return '%dmn' % duration if duration < 60 else '%dh%dmn' % (duration / 60, duration % 60)",
"def display_time():\n now = datetime.datetime.now()\n date_time = now.strftime(\"date: %b/%d/%Y, time: %I:%M %p\")\n print(date_time)\n print(\"\\n------------------------------------------------------\\n\")\n print(now.strftime(\"year: %Y\"))\n print(now.strftime(\"month: %B\"))\n print(now.strftime(\"week number of the year: %U\"))\n print(now.strftime(\"Weekday of the week: %w\"))\n print(now.strftime(\"Day of the year: %j\"))\n print(now.strftime(\"Day of the month: %d\"))\n print(now.strftime(\"Day of the week: %A\"))",
"def _get_time(self, sec, nsec):\n return sec + nsec / (10**9)",
"def print_time(start):\n if (time.time()-start)/60 > 1:\n elapsed_time = divmod(time.time()-start, 60)\n if elapsed_time[0] > 1:\n print(\"\\nElapsed time: %.0f minutes, %.2f seconds\" % (elapsed_time[0], elapsed_time[1]))\n else:\n print(\"\\nElapsed time: %.0f minute, %.2f seconds\" % (elapsed_time[0], elapsed_time[1]))\n else:\n print(\"\\nElapsed time: %.2f seconds\" % (time.time()-start))\n print(\"#\"*19 + \"\\n\" + time.strftime(\"%d/%m/%Y %H:%M:%S\") + \"\\n\" + \"#\"*19 + \"\\n\")",
"def HMStime(s):\n if s < 60.:\n return '%.2f s' % s\n elif s < 3600.:\n return '%d:%.2f' % (int(s / 60 % 60), s % 60)\n else:\n return '%d:%d:%.2f' % (int(s / 3600), int(s / 60 % 60), s % 60)",
"def nowStr(time=None):\n if time is None:\n time = datetime.now().time()\n if time.minute < 10:\n return time.strftime(\"%H ноль %m\")\n else:\n return time.strftime(\"%H %M\")"
] | [
"0.68090326",
"0.66291904",
"0.6462265",
"0.63614434",
"0.6307813",
"0.6285851",
"0.62751526",
"0.6242384",
"0.62390834",
"0.6192343",
"0.6117744",
"0.6111203",
"0.6092385",
"0.60823214",
"0.60819495",
"0.608081",
"0.6054099",
"0.6031724",
"0.60215324",
"0.5962884",
"0.5936603",
"0.5933509",
"0.59334236",
"0.5932619",
"0.59306145",
"0.59018284",
"0.59007025",
"0.58979064",
"0.58870035",
"0.58802915"
] | 0.7196308 | 0 |
Creates a new Participant model, filling in some default constructor args. This is intended especially for updates, where more fields are required than for inserts. | def _participant_with_defaults(self, **kwargs):
defaults = {
'hpoId': UNSET_HPO_ID,
'withdrawalStatus': WithdrawalStatus.NOT_WITHDRAWN,
'suspensionStatus': SuspensionStatus.NOT_SUSPENDED,
'participantOrigin': 'example',
'version': 1,
'lastModified': datetime.now(),
'signUpTime': datetime.now(),
'isTestParticipant': False
}
defaults.update(kwargs)
if 'biobankId' not in defaults:
defaults['biobankId'] = self.unique_participant_biobank_id()
if 'participantId' not in defaults:
defaults['participantId'] = self.unique_participant_id()
if 'researchId' not in defaults:
defaults['researchId'] = self.unique_research_id()
return Participant(**defaults) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_participant(name='Not Brian', email='[email protected]') ->\\\n Participant:\n participant = Participant(name=name, email=email)\n return participant",
"def __init__(self,\r\n username=None,\r\n first_name=None,\r\n last_name=None,\r\n application_id=None,\r\n additional_properties = {}):\r\n\r\n # Initialize members of the class\r\n self.username = username\r\n self.first_name = first_name\r\n self.last_name = last_name\r\n self.application_id = application_id\r\n\r\n # Add additional model properties to the instance\r\n self.additional_properties = additional_properties",
"def create(self,**extra_fields):\r\n print(extra_fields)\r\n data = self.model(**extra_fields)\r\n data.save(using=self._db)",
"def __init__(self,\r\n name=None,\r\n given_name=None,\r\n middle_name=None,\r\n family_name=None,\r\n address=None,\r\n additional_properties = {}):\r\n\r\n # Initialize members of the class\r\n self.name = name\r\n self.given_name = given_name\r\n self.middle_name = middle_name\r\n self.family_name = family_name\r\n self.address = address\r\n\r\n # Add additional model properties to the instance\r\n self.additional_properties = additional_properties",
"def __init__(self, conversation_participant_arn=None, conversation_participant_name=None, conversation_participant_uuid=None, email=None, joined_dts=None, language_iso_code=None, last_message_dts=None, left_dts=None, profile_image_url=None, sms_phone_number=None, status=None, timezone=None, unread_messages=None): # noqa: E501 # noqa: E501\n\n self._conversation_participant_arn = None\n self._conversation_participant_name = None\n self._conversation_participant_uuid = None\n self._email = None\n self._joined_dts = None\n self._language_iso_code = None\n self._last_message_dts = None\n self._left_dts = None\n self._profile_image_url = None\n self._sms_phone_number = None\n self._status = None\n self._timezone = None\n self._unread_messages = None\n self.discriminator = None\n\n if conversation_participant_arn is not None:\n self.conversation_participant_arn = conversation_participant_arn\n if conversation_participant_name is not None:\n self.conversation_participant_name = conversation_participant_name\n if conversation_participant_uuid is not None:\n self.conversation_participant_uuid = conversation_participant_uuid\n if email is not None:\n self.email = email\n if joined_dts is not None:\n self.joined_dts = joined_dts\n if language_iso_code is not None:\n self.language_iso_code = language_iso_code\n if last_message_dts is not None:\n self.last_message_dts = last_message_dts\n if left_dts is not None:\n self.left_dts = left_dts\n if profile_image_url is not None:\n self.profile_image_url = profile_image_url\n if sms_phone_number is not None:\n self.sms_phone_number = sms_phone_number\n if status is not None:\n self.status = status\n if timezone is not None:\n self.timezone = timezone\n if unread_messages is not None:\n self.unread_messages = unread_messages",
"def __init__(__self__, *,\n email: Optional[pulumi.Input[str]] = None,\n first_name: Optional[pulumi.Input[str]] = None,\n last_name: Optional[pulumi.Input[str]] = None,\n phone: Optional[pulumi.Input[str]] = None,\n title: Optional[pulumi.Input[str]] = None):\n if email is not None:\n pulumi.set(__self__, \"email\", email)\n if first_name is not None:\n pulumi.set(__self__, \"first_name\", first_name)\n if last_name is not None:\n pulumi.set(__self__, \"last_name\", last_name)\n if phone is not None:\n pulumi.set(__self__, \"phone\", phone)\n if title is not None:\n pulumi.set(__self__, \"title\", title)",
"def create(cls, *args, **kwargs):\r\n return cls(*args, **kwargs).save()",
"def __init__(self,\r\n id=None,\r\n name=None,\r\n last_edited=None,\r\n additional_properties = {}):\r\n\r\n # Initialize members of the class\r\n self.id = id\r\n self.name = name\r\n self.last_edited = APIHelper.RFC3339DateTime(last_edited) if last_edited else None\r\n\r\n # Add additional model properties to the instance\r\n self.additional_properties = additional_properties",
"def create(cls, **kwargs):\r\n return cls().fill(**kwargs).save()",
"def __init__(self, id, first_name, last_name, player_name, age, type):\n AbstractEsportsPlayer._validate_input_integer('ID', id)\n self._id = id\n\n AbstractEsportsPlayer._validate_input_string('First Name', first_name)\n self._first_name = first_name\n\n AbstractEsportsPlayer._validate_input_string('Last Name', last_name)\n self._last_name = last_name\n\n AbstractEsportsPlayer._validate_input_string('Player Name', player_name)\n self._player_name = player_name\n\n AbstractEsportsPlayer._validate_input_integer('Age', age)\n self._age = age\n\n AbstractEsportsPlayer._validate_input_string('Type', type)\n self._type = type",
"def __init__(self, uuid=None, email=None, first_name=None, last_name=None, phone=None, address=None, cusno=None, subs=None, consent=None, legal=None, pending_address_changes=None, past_temporary_addresses=None, has_completed_registration=None): # noqa: E501 # noqa: E501\n\n self._uuid = None\n self._email = None\n self._first_name = None\n self._last_name = None\n self._phone = None\n self._address = None\n self._cusno = None\n self._subs = None\n self._consent = None\n self._legal = None\n self._pending_address_changes = None\n self._past_temporary_addresses = None\n self._has_completed_registration = None\n self.discriminator = None\n\n self.uuid = uuid\n self.email = email\n if first_name is not None:\n self.first_name = first_name\n if last_name is not None:\n self.last_name = last_name\n if phone is not None:\n self.phone = phone\n if address is not None:\n self.address = address\n self.cusno = cusno\n self.subs = subs\n self.consent = consent\n self.legal = legal\n if pending_address_changes is not None:\n self.pending_address_changes = pending_address_changes\n self.past_temporary_addresses = past_temporary_addresses\n self.has_completed_registration = has_completed_registration",
"def abstract_create(self, model, params):\n # we check that the given fields exist\n self.check_fields_existence(model, params.keys())\n\n # then we create the record after preparing params\n return self.env[model].sudo().create(self._prepare_params(params))",
"def create(cls, fields=None, **fields_kwargs):\n # NOTE -- you cannot use hydrate/populate here because populate alters modified fields\n instance = cls(fields, **fields_kwargs)\n instance.save()\n return instance",
"def __init__(self, message_id, event_type, generated, traits, raw):\n Model.__init__(self, message_id=message_id, event_type=event_type,\n generated=generated, traits=traits, raw=raw)",
"def create(cls, **kwargs):\n instance = cls(**kwargs)\n instance.save()\n return instance",
"def create(self, **kwargs):\n reverse_one_to_one_fields = frozenset(kwargs).intersection(\n self.model._meta._reverse_one_to_one_field_names\n )\n if reverse_one_to_one_fields:\n raise ValueError(\n \"The following fields do not exist in this model: %s\"\n % \", \".join(reverse_one_to_one_fields)\n )\n\n obj = self.model(**kwargs)\n self._for_write = True\n obj.save(force_insert=True, using=self.db)\n return obj",
"def __init__(self,\r\n id=None,\r\n consumer_id=None,\r\n consumer_ssn=None,\r\n requester_name=None,\r\n request_id=None,\r\n constraints=None,\r\n mtype=None,\r\n status=None,\r\n created_date=None,\r\n additional_properties = {}):\r\n\r\n # Initialize members of the class\r\n self.id = id\r\n self.consumer_id = consumer_id\r\n self.consumer_ssn = consumer_ssn\r\n self.requester_name = requester_name\r\n self.request_id = request_id\r\n self.constraints = constraints\r\n self.mtype = mtype\r\n self.status = status\r\n self.created_date = created_date\r\n\r\n # Add additional model properties to the instance\r\n self.additional_properties = additional_properties",
"def __init__(self, username=None, realname=None, phone=None, email=None, room=None, position=None, department_id=None, building_id=None, id=None, version_lock=None, local_vars_configuration=None): # noqa: E501 # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._username = None\n self._realname = None\n self._phone = None\n self._email = None\n self._room = None\n self._position = None\n self._department_id = None\n self._building_id = None\n self._id = None\n self._version_lock = None\n self.discriminator = None\n\n self.username = username\n self.realname = realname\n self.phone = phone\n self.email = email\n self.room = room\n self.position = position\n self.department_id = department_id\n self.building_id = building_id\n self.id = id\n self.version_lock = version_lock",
"def __init__(self,\r\n setup=None,\r\n merge_fields=None,\r\n additional_properties = {}):\r\n\r\n # Initialize members of the class\r\n self.setup = setup\r\n self.merge_fields = merge_fields\r\n\r\n # Add additional model properties to the instance\r\n self.additional_properties = additional_properties",
"def __init__(self,beneficiary:Participant,amount:float,end_to_end_id:str,tx_id:str,acceptance_datetime:datetime,reference:str,remittance_information:str): \n self.beneficiary = beneficiary\n self.amount = amount\n self.tx_id = tx_id\n self.end_to_end_id = end_to_end_id\n self.acceptance_datetime = acceptance_datetime\n self.reference = reference\n self.remittance_information = remittance_information,",
"def __init__(self, id=None, name=None, created=None, creator=None):\n\n self._id = None\n self._name = None\n self._created = None\n self._creator = None\n\n self.id = id\n self.name = name\n self.created = created\n self.creator = creator",
"def __init__(self, *args, **kwargs):\n if kwargs:\n for key, value in kwargs.items():\n if key != \"__class__\":\n if key == \"created_at\":\n self.created_at = datetime.strptime(\n value, \"%Y-%m-%dT%H:%M:%S.%f\")\n elif key == \"updated_at\":\n self.updated_at = datetime.strptime(\n value, \"%Y-%m-%dT%H:%M:%S.%f\")\n elif key == \"id\":\n self.id = value\n else:\n setattr(self, key, value)\n else:\n self.id = str(uuid.uuid4())\n self.created_at = datetime.now()\n self.updated_at = datetime.now()",
"def __init__(self, *args, **kwargs):\n if (args and type(args) is dict):\n BaseModel.__init__(self, args[0])\n else:\n BaseModel.__init__(self)",
"def __init__(self, *args, **kwds):\n if args or kwds:\n super(DetectedPerson, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.detection_id is None:\n self.detection_id = 0\n if self.confidence is None:\n self.confidence = 0.\n if self.pose is None:\n self.pose = geometry_msgs.msg.PoseWithCovariance()\n if self.height is None:\n self.height = 0.\n if self.bbox_x is None:\n self.bbox_x = 0.\n if self.bbox_y is None:\n self.bbox_y = 0.\n if self.bbox_w is None:\n self.bbox_w = 0.\n if self.bbox_h is None:\n self.bbox_h = 0.\n if self.modality is None:\n self.modality = ''\n if self.embed_vector is None:\n self.embed_vector = []\n else:\n self.detection_id = 0\n self.confidence = 0.\n self.pose = geometry_msgs.msg.PoseWithCovariance()\n self.height = 0.\n self.bbox_x = 0.\n self.bbox_y = 0.\n self.bbox_w = 0.\n self.bbox_h = 0.\n self.modality = ''\n self.embed_vector = []",
"def __init__(self,\n id=None,\n created_at=None,\n sender_id=None,\n sender_screen_name=None,\n recipient_id=None,\n recipient_screen_name=None,\n text=None):\n self.id = id\n self.created_at = created_at\n self.sender_id = sender_id\n self.sender_screen_name = sender_screen_name\n self.recipient_id = recipient_id\n self.recipient_screen_name = recipient_screen_name\n self.text = text",
"def __init__(self, *args, **kwargs):\n for key, value in kwargs.items():\n if key == \"created_at\":\n value = datetime.strptime(value, \"%Y-%m-%dT%H:%M:%S.%f\")\n setattr(self, key, value)\n\n if key == \"updated_at\":\n value = datetime.strptime(value, \"%Y-%m-%dT%H:%M:%S.%f\")\n setattr(self, key, value)\n\n if key == \"__class__\":\n continue\n else:\n setattr(self, key, value)\n\n if len(kwargs) == 0:\n self.id = str(uuid.uuid4())\n self.created_at = datetime.now()\n self.updated_at = datetime.now()\n storage.new(self)",
"def __init__(self, \n # - Arguments from Person\n given_name=None, initials=None, family_name=None, \n email_address=None,\n # - Other staff-specific arguments\n department=None\n ):\n # - We can use super() to call the parent class' __init__ \n # because there's only one parent class...\n super().__init__(\n given_name, initials, family_name, email_address\n )\n # - But we ALSO need to initialize properties that are \n # members of THIS class\n self.department = department",
"def __init__(self, name=None, phone=None, country=None, fax=None, email=None, address=None, description=None):\n \n \n\n self._name = None\n self._phone = None\n self._country = None\n self._fax = None\n self._email = None\n self._address = None\n self._description = None\n self.discriminator = None\n\n if name is not None:\n self.name = name\n if phone is not None:\n self.phone = phone\n if country is not None:\n self.country = country\n if fax is not None:\n self.fax = fax\n if email is not None:\n self.email = email\n if address is not None:\n self.address = address\n if description is not None:\n self.description = description",
"def create(self, **kwargs):\n return self.save(self.new(**kwargs))",
"def __init__(self, \n student_id=0,\n # - Arguments from Person\n given_name=None, initials=None, family_name=None, \n email_address=None,\n # - Other student-specific arguments\n major=None, minor=None\n ):\n # - We can use super() to call the parent class' __init__ \n # because there's only one parent class...\n super().__init__(\n given_name, initials, family_name, email_address\n )\n # - But we ALSO need to initialize properties that are \n # members of THIS class\n self.student_id = student_id\n self.major = major\n self.minor = minor"
] | [
"0.6663132",
"0.6514317",
"0.6252609",
"0.6068203",
"0.60313624",
"0.59170717",
"0.58870864",
"0.5882688",
"0.5867263",
"0.5828657",
"0.5822187",
"0.5800505",
"0.5759603",
"0.5748322",
"0.57338744",
"0.5728453",
"0.5710991",
"0.5684834",
"0.5683016",
"0.5629623",
"0.56254387",
"0.5615373",
"0.56034875",
"0.5596496",
"0.55756897",
"0.556544",
"0.5562944",
"0.5558345",
"0.55571026",
"0.5556127"
] | 0.6780463 | 0 |
Dict like resource getter | def __getitem__(self, name):
return self.__resources[name] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def resource_map(self):",
"def get_resource_data(self, **kw):\n data = dict(\n url=self['url'],\n dist=self['name'])\n data.update(kw)\n return data",
"def data(self):\n return { # TODO Actually query for this shit\n \"foo\": self.__name__,\n \"url\": f\"{self.request.resource_url(self)}\",\n }",
"def __init__(self, path=None, resource=\"names\",\n language=config[\"default_language\"],\n lowercasing=config[\"lowercasing\"],\n corpus=config[\"corpus\"], freq=False, wordlist=None):\n\n super(ResourceDict, self).__init__()\n if len(language) > 2:\n language = lookup_language_by_code(language, reverse=True)\n #: the language of the resource\n self.language = language\n\n if isinstance(path, str):\n self.path=path\n\n if not path:\n\n if resource in [\"names\", \"numbers\", \"associations\", \"gdeps\"]:\n resource_type = \"language_resources\"\n subfolder = self.language\n elif resource in [\"freqdict\", \"vocabulary\", \"cooccurrence\"]:\n resource_type = \"corpus_resources\"\n subfolder = corpus\n\n path_to_dict = os.path.join(config[\"path_to_resources\"],\n resource_type, subfolder,\n config[resource_type][subfolder][\n resource])\n\n #: the path from which the resource is loaded\n self.path = path_to_dict\n\n try:\n if resource not in [\"cooccurrence\", \"gdeps\"]:\n data = load_resource(self.path, format=\"infer\",\n lowercasing=lowercasing, silent=True)\n else:\n\n if freq:\n data = load_resource(self.path, format=\"json_freqdict\",\n lowercasing=lowercasing, silent=True)\n\n else:\n data = load_resource(self.path, format=\"infer\",\n lowercasing=lowercasing, silent=True,\n wordlist=wordlist)\n\n # else:\n # data = load_resource(self.path, format=\"json\",\n # lowercasing=lowercasing, silent=True)\n self.data = data\n except FileNotFoundError:\n print(\"No resource was found, please check the file path \"\n \"\"+self.path)",
"def getResource(self):\n pass;",
"def __getattr__(self, attr):\n actual_resource = getattr(self.swagger_client, attr)\n if attr in [\"Authorization\", \"Effects\", \"Identify\", \"Info\",\n \"PanelLayout\", \"State\"]:\n return WrappedResource(actual_resource, attr)\n else:\n return actual_resource",
"def get(self, resource, default=0):\n return getattr(self._resources, resource, default)",
"def build_resource(self, *args, **kwargs):\r\n r = {}\r\n for current_resource in self.resources:\r\n item = self._get_resource(\r\n repo=self.current_repo, owner=self.owner, \r\n resource=current_resource, **kwargs\r\n )\r\n if not item: continue\r\n r[current_resource] = item\r\n\r\n return r",
"def _get_resource(self, label: str, source: dict, resource_type: str):\r\n try:\r\n return source[label]\r\n except KeyError:\r\n raise ValueError(\"Cannot find {0} with label '{1}'.\\nExisting {0} labels: {2}\".format(\r\n resource_type, label, list(source.keys())))",
"def get(self, obj):",
"def on_get_resource(self, req, resp, **params):\n instance = self.get_object(**params)\n resp.json(**instance.as_resource)",
"def get_resource_information():\n\n\n # the resources we are allowed to use is easy. We just copy this...\n resource_limit_dict = _resources_allowed_dict.copy()\n\n \n # from the other dict, we only take the resource information. (this omits\n # locks and timing information that isn't needed)\n\n # first, let's do the easy thing, the quantity resources. These are just \n # floats\n resource_use_dict = {}\n for resourcename in resource_constants.quantity_resources:\n resource_use_dict[resourcename] = _resources_consumed_dict[resourcename]\n\n # for the fungible resources (files opened, etc,), we only need a count...\n for resourcename in resource_constants.fungible_item_resources:\n resource_use_dict[resourcename] = len(_resources_consumed_dict[resourcename])\n\n # for the individual item resources (ports, etc,), we copy the set...\n for resourcename in resource_constants.individual_item_resources:\n resource_use_dict[resourcename] = _resources_consumed_dict[resourcename].copy()\n\n # and that's it!\n return (resource_limit_dict, resource_use_dict)",
"def get_resource_data(self, resource):\n url = self.api_url + resource\n return self.get_url_data(url)",
"def get(owner, resource):\n resource = logic.resource.find(owner, resource)\n return jsonify(resource)",
"def get(self, name):",
"def load_resource(self, resource_cls, data):\n rdata = {}\n wlinks = getattr(resource_cls, '_writeable_link_fields', {})\n rlinks = getattr(resource_cls, '_readonly_link_fields', {})\n link_names = set(['id'] + list(wlinks.keys()) + list(rlinks.keys()))\n for key, value in data.items():\n if key in link_names:\n if isinstance(value, list):\n raw_ids = value\n unlist = False\n else:\n raw_ids = [value]\n unlist = True\n ids = []\n for i in raw_ids:\n if i is None:\n ids.append(None)\n else:\n ids.append(str(i))\n if unlist:\n rdata[key] = ids[0]\n else:\n rdata[key] = ids\n else:\n rdata[key] = value\n return resource_cls(**rdata)",
"def _resource_fields(chromo):\n return {\n 'name': chromo['resource_name'],\n 'description': chromo['title'],\n 'url_type': u'datastore',\n }",
"def resources(self):",
"async def get_resource(self, resource_id: int) -> dict:\n resource = await self.request.get(\n join_path(self._base_path, str(resource_id))\n )\n self._sanitize_resource(self._get_to_actual_data(resource))\n return resource",
"def get(self, key):",
"def get(self, key):",
"def ResourcePath(self, name):\n pass",
"def __getattr__(self, key):\n if key in ('keys', 'values', 'items'):\n return getattr(self.data, key)\n return super().__getattr__(key)",
"def get_resource_data(resource_type):\n resources = resource_type.objects.all()\n data_dict = {}\n for resource in resources:\n data_dict[get_object_id(resource)] = expand_resource(resource)\n\n return data_dict",
"def getResource(self):\n return self.__resource;",
"def __getattr__(self, key):\n if key == 'name':\n return self._name\n elif key == 'raw':\n return self._raw\n elif key == 'eng':\n return self._eng\n elif key == 'children':\n return self._children\n elif key == 'raw_int':\n return self.get_raw_int()\n elif key == 'desc':\n return STIX_IDB.get_parameter_description(self._name)\n elif key == 'param':\n return self.as_tuple()\n else:\n raise KeyError(key)",
"def paths(domain, resource):\n ret = {}\n path = '/{0}'.format(resource.get('url', domain))\n path = re.sub(r'<(?:[^>]+:)?([^>]+)>', '{\\\\1}', path)\n pathtype = 'resource'\n ret[path] = methods(domain, resource, pathtype)\n\n primary = identifier(resource)\n path = '{0}/{1}'.format(path, pathparam(primary['name']))\n pathtype = 'item'\n ret[path] = methods(domain, resource, pathtype)\n\n alt = resource.get('additional_lookup', None)\n if alt is not None:\n path = '/{0}/{1}'.format(domain, pathparam(alt['field']))\n pathtype = 'additional_lookup'\n ret[path] = methods(domain, resource, pathtype, alt['field'])\n return ret",
"def get():",
"def get():",
"def lookup(self):\r\n return resources.Lookup(self)"
] | [
"0.66327727",
"0.6343687",
"0.6287191",
"0.6218388",
"0.61918527",
"0.6092798",
"0.60210764",
"0.60139334",
"0.59463495",
"0.5932285",
"0.59251744",
"0.59157777",
"0.58940756",
"0.58883554",
"0.58654296",
"0.5848385",
"0.5831318",
"0.58073753",
"0.5795992",
"0.5769386",
"0.5769386",
"0.5740434",
"0.5720768",
"0.5702958",
"0.569482",
"0.5690995",
"0.5679895",
"0.5678753",
"0.5678753",
"0.56743896"
] | 0.6858929 | 0 |
Resolve a filename relative to this directory | def resolve(self, name):
# pylint: disable = E1101
root = _os.path.normpath('/')
resolved = _os.path.splitdrive(_os.path.normpath(
_os.path.join(root, unicode(name).encode('utf-8'))
))[1]
while resolved.startswith(root):
resolved = resolved[1:]
resolved = _os.path.normpath(
_os.path.join(self._base, resolved)
).decode('utf-8')
return FileResource(self, name, resolved.encode(self._encoding)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def resolve(fname):\n if os.path.dirname(__file__):\n return os.path.dirname(__file__) + \"/../common/\" + fname\n else:\n return \"/common/\" + fname",
"def rel_filename(filename, relative_to=None):\n if relative_to is None:\n relative_to = os.getcwd()\n if not relative_to.endswith(os.path.sep):\n relative_to += os.path.sep\n filename = os.path.normpath(os.path.abspath(filename))\n if filename.startswith(relative_to):\n return filename[len(relative_to):]\n else:\n return filename",
"def resolve(name):\n arg = Path(name)\n return str(arg.resolve())",
"def find_file(file_name):\n if (pathlib.Path(file_name).resolve()):\n file_name = str(file_name)\n logging.info(f' found {file_name}.')\n return file_name\n else:\n logging.error(f' no file {file_name} found for processing.')\n sys.exit()",
"def _resolve_relative_path(filepath: str):\n if not filepath:\n return None\n\n inf_path = os.path.join(os.path.dirname(__file__), filepath)\n\n return inf_path",
"def _GetRelPath(self, filename):\r\n absname = os.path.join(self.repo_dir, filename)\r\n return os.path.relpath(absname)",
"def resolved(path: Union[str, Path]) -> str:\n return os.path.basename(os.path.abspath(path))",
"def resolve(self, path=''):\n path = self._sep.join([self.root] + path.split('/'))\n return realpath(path)",
"def _resolve_file_path(download_dir, file_name):\n # separate file path to basename and extension\n basename = _get_default_file_path(download_dir, file_name)\n ext = ''\n while True:\n basename, new_ext = os.path.splitext(basename)\n if not new_ext:\n break\n ext = new_ext + ext\n\n # get existing files with same name\n # NOTICE: glob does not support regex, we expect \"*\" is number\n sibling_files = glob.glob('{}.*{}'.format(basename, ext))\n\n def number_from_path(path):\n \"\"\"\n return number from file path\n ex) /target/file.1.txt -> 1\n ex) /target/file.2.tar.gz -> 2\n \"\"\"\n try:\n num = int(path[len(basename) + 1:-len(ext)])\n except ValueError:\n return 0\n return num\n\n # find max number\n if not sibling_files:\n assign_number = 1\n else:\n max_numbered_path = max(sibling_files, key=number_from_path)\n max_number = number_from_path(max_numbered_path)\n assign_number = max_number + 1\n\n return ''.join([basename, '.', str(assign_number), ext])",
"def relpath(filename):\n return os.path.join(os.path.dirname(__file__), filename)",
"def normalize_path(working_dir, filename):\n\n if not os.path.isabs(filename):\n filename = os.path.join(working_dir, filename)\n\n return filename",
"def relpath(filename):\n\n return os.path.join(os.path.dirname(__file__), filename)",
"def canonical_filename(filename):\n import os, os.path\n\n path = from_posix(filename)\n while True:\n path = os.path.abspath(path)\n try:\n p = os.path.dirname(path)\n # os.readlink doesn't exist in windows python2.7\n try:\n deref_path = os.readlink(path)\n except AttributeError: # pragma: no cover\n return path\n path = os.path.join(p, deref_path)\n except OSError:\n return path",
"def resolvePath_(cls, path):\r\n try:\r\n fsref, isFolder, wasAliased = FSResolveAliasFile(os.path.realpath(path), 1)\r\n return os.path.abspath(fsref.as_pathname().decode(u\"utf-8\"))\r\n except MacOS.Error as e:\r\n return None",
"def get_abs_filename(filename):\n absolute_current_dir = os.path.abspath(os.path.dirname(__file__))\n return os.path.join(absolute_current_dir, filename)",
"def Filename(fname):\n if fname.startswith('##/'):\n if chroot_path:\n fname = os.path.join(chroot_path, fname[3:])\n else:\n return None\n\n # Search for a pathname that exists, and return it if found\n if fname and not os.path.exists(fname):\n for path in search_paths:\n pathname = os.path.join(path, os.path.basename(fname))\n if os.path.exists(pathname):\n return pathname\n\n # If not found, just return the standard, unchanged path\n return fname",
"def resolved(rpath):\r\n return realpath(abspath(rpath))",
"def _resolve_file_or_none(context_dir, conf, conf_file, has_args=False):\n if not conf:\n return None\n base1 = os.path.expanduser(context_dir)\n base2 = os.path.expanduser(conf)\n path = os.path.join(base1, base2)\n path = os.path.abspath(path) # This resolves \"/../\"\n if not os.path.exists(path):\n raise Exception(\"File does not exist: '%s'. This was \"\n \"referenced in the file '%s'.\" % (path, conf_file))\n return path",
"def normalizeFilename(filename):\n return os.path.abspath(os.path.expanduser(filename))",
"def _GetRelPath(self, filename):\n assert filename.startswith(self.subdir), (filename, self.subdir)\n return filename[len(self.subdir):].lstrip(r\"\\/\")",
"def resolve(self, filespec):\n \n parts = re.split(r\"[\\\\/]\", filespec)\n # try to substitute the first part as if it is a handle\n parts[0] = self.fhdict.get(parts[0].lower(), (parts[0],))[0]\n return os.path.sep.join(parts)",
"def get_filename(filepath):\n return filepath.replace(\"{}\\\\\".format(RES_DIR), \"\")",
"def auto_file(filename, where='.') -> str:\n\n if os.path.isabs(filename):\n return filename\n\n prob = os.path.join(where, filename)\n if os.path.exists(prob) and os.path.isfile(prob):\n return prob\n\n files = list(glob.iglob(os.path.join(where, '**', filename), recursive=True))\n if len(files) == 0:\n raise FileNotFoundError('Given file could not be found with recursive search:' + filename)\n\n if len(files) > 1:\n raise FileNotFoundError('More than one file matches given filename. Please specify it explicitly' + filename)\n\n return files[0]",
"def rel_resolve(path):\n if os.path.isabs(path):\n return os.path.abspath(path)\n else:\n return os.path.join(SCRIPTDIR, path)",
"def lookupmodule(self, filename):\n if os.path.isabs(filename) and os.path.exists(filename):\n return filename\n f = os.path.join(sys.path[0], filename)\n if os.path.exists(f) and self.canonic(f) == self.mainpyfile:\n return f\n root, ext = os.path.splitext(filename)\n if ext == '':\n filename = filename + '.py'\n if os.path.isabs(filename):\n return filename\n for dirname in sys.path:\n while os.path.islink(dirname):\n dirname = os.readlink(dirname)\n fullname = os.path.join(dirname, filename)\n if os.path.exists(fullname):\n return fullname\n return None",
"def full_path(filename):\n\timport os.path\n\tfolder = os.path.dirname(os.path.realpath(__file__))\n\treturn os.path.join(folder, filename)",
"def resolve_name(name):\n if type(name) is not str:\n raise TypeError('Expected type str for name but got ' + type(name))\n\n if not path.exists(name):\n return name\n\n (file, ext) = path.splitext(name)\n n = 1\n tmp_name = file + '.' + str(n) + ext\n while path.exists(tmp_name):\n n += 1\n tmp_name = file + '.' + str(n) + ext\n\n return tmp_name",
"def _lookupFileName (self,\n enclosingDirectoryName : String,\n originalFileName : String) -> String:\n\n Logging.trace(\">>: directory = %r, file = %r\",\n enclosingDirectoryName, originalFileName)\n\n cls = self.__class__\n result = None\n separator = OperatingSystem.pathSeparator\n simpleFileName = OperatingSystem.basename(originalFileName)\n searchPathList = list(cls._searchPathList)\n searchPathList.append(enclosingDirectoryName)\n\n for directoryName in searchPathList:\n fileName = iif(directoryName == \".\", originalFileName,\n directoryName + separator + simpleFileName)\n isFound = OperatingSystem.hasFile(fileName)\n Logging.trace(\"--: %r -> found = %r\", fileName, isFound)\n\n if isFound:\n result = fileName\n break\n\n Logging.trace(\"<<: %r\", result)\n return result",
"def _abs_path(fn):\n return os.path.join(os.path.dirname(__file__), fn)",
"def abs_path(file_name: str, directory: str) -> str:\r\n return os.path.join(directory, file_name)"
] | [
"0.7362332",
"0.6747728",
"0.6739235",
"0.65846235",
"0.656219",
"0.65612847",
"0.65566796",
"0.6551496",
"0.65442425",
"0.65062994",
"0.65059465",
"0.6505097",
"0.6504365",
"0.6454143",
"0.6390446",
"0.6387397",
"0.6368316",
"0.632986",
"0.631312",
"0.6308752",
"0.62896514",
"0.6282436",
"0.62712085",
"0.626751",
"0.62293625",
"0.6223882",
"0.62203157",
"0.62131935",
"0.62064826",
"0.61681145"
] | 0.72245663 | 1 |
returns a list of functions that should fail when the network/trustline_0_1 is frozen and their arguments the functions are expected to be called from accounts[0] | def frozen_functions_and_args(accounts):
return [
["transfer", (1, 2, [accounts[0], accounts[1]], b"")],
["transferReceiverPays", (1, 2, [accounts[0], accounts[1]], b"")],
["transferFrom", (1, 2, [accounts[0], accounts[1]], b"")],
["updateTrustline", (accounts[1], 101, 101, 101, 101, True)],
["updateCreditlimits", (accounts[1], 101, 101)],
["closeTrustline", [accounts[1]]],
[
"closeTrustlineByTriangularTransfer",
(accounts[1], 100, [accounts[1], accounts[2]]),
],
] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def condition_for_function(f, abi, all_not_in_ABI):\n\n\tcondition = []\n\tfor n in f.entry_points:\n\t\t[category, num] = api.get_category_for_name( n )\n\t\tif category not in abi:\n\t\t\tcondition.append( 'defined(need_%s)' % (gl_XML.real_category_name( category )) )\n\t\telif all_not_in_ABI:\n\t\t\treturn []\n\n\treturn condition",
"def test_mediate_transfer_fails_if_intermediate_trustline_frozen(\n currency_network_contract_with_frozen_trustline, accounts\n):\n network = currency_network_contract_with_frozen_trustline\n\n path = [accounts[4], accounts[0], accounts[1], accounts[2]]\n\n with pytest.raises(eth_tester.exceptions.TransactionFailed):\n network.functions.transfer(10, 10, path, b\"\").transact({\"from\": accounts[4]})",
"def _check_integrity(self):\n for f in self.list_func:\n if(not(isinstance(f, (pFunc_collec, pFunc_base, pFunc_fromcallable)))):\n raise ValueError('type %s while expecting pFunc_base or collection'\n ' ' % (str(type(f))))\n f._check_integrity()",
"def validate_chain():",
"def credibility_unsafe(*mass_functions):\n #Get the supports of each mass function:\n supports = []\n for mass_function in mass_functions:\n supports.append(mass_function.support_unsafe(*[x for x in mass_functions if x != mass_function]))\n \n #Compute the credibility of each mass function:\n cred = []\n supportSum = sum(supports)\n for i in range(len(mass_functions)):\n cred.append(round(supports[i]/supportSum, 6))\n return cred",
"def test_all_functions_auth_failure(self):\r\n \r\n auth = {'username':'tester', 'api_key':'api_key'}\r\n\r\n # Indicate no user record was found with the provided auth info.\r\n interface.get_user_with_api_key = mock_raises_DoesNotExistError\r\n \r\n try:\r\n proxy.renew_resources(auth, [])\r\n except xmlrpclib.Fault, e:\r\n self.assertEqual(e.faultCode, views.FAULTCODE_AUTHERROR)\r\n else:\r\n self.fail(\"Expected an exception.\")\r\n \r\n try:\r\n proxy.acquire_resources(auth, {})\r\n except xmlrpclib.Fault, e:\r\n self.assertEqual(e.faultCode, views.FAULTCODE_AUTHERROR)\r\n else:\r\n self.fail(\"Expected an exception.\")\r\n \r\n try:\r\n proxy.acquire_specific_vessels(auth, [])\r\n except xmlrpclib.Fault, e:\r\n self.assertEqual(e.faultCode, views.FAULTCODE_AUTHERROR)\r\n else:\r\n self.fail(\"Expected an exception.\")\r\n \r\n try:\r\n proxy.release_resources(auth, [])\r\n except xmlrpclib.Fault, e:\r\n self.assertEqual(e.faultCode, views.FAULTCODE_AUTHERROR)\r\n else:\r\n self.fail(\"Expected an exception.\")\r\n\r\n try:\r\n proxy.get_resource_info(auth)\r\n except xmlrpclib.Fault, e:\r\n self.assertEqual(e.faultCode, views.FAULTCODE_AUTHERROR)\r\n else:\r\n self.fail(\"Expected an exception.\")\r\n\r\n try:\r\n proxy.get_account_info(auth)\r\n except xmlrpclib.Fault, e:\r\n self.assertEqual(e.faultCode, views.FAULTCODE_AUTHERROR)\r\n else:\r\n self.fail(\"Expected an exception.\")\r\n\r\n try:\r\n proxy.get_public_key(auth)\r\n except xmlrpclib.Fault, e:\r\n self.assertEqual(e.faultCode, views.FAULTCODE_AUTHERROR)\r\n else:\r\n self.fail(\"Expected an exception.\")",
"def check_mass_functions_compatibility(function):\n @functools.wraps(function)\n def wrapped_function(*args):\n for i in range(len(args)):\n for j in range(len(args)):\n if i != j and not args[i].is_compatible(args[j]):\n raise IncompatibleMassFunctionsError(args[i], args[j])\n return function(*args)\n return wrapped_function",
"def fail_on_zero(func_name, result, func, args):\n if not result:\n raise WinproxyError(func_name)\n return args",
"def test_func_raises_type_error(self):\n funcs = [\n CityHash32,\n CityHash64,\n CityHash128,\n CityHash64WithSeed,\n CityHash64WithSeeds,\n CityHash128WithSeed,\n ]\n for func in funcs:\n with self.assertRaises(TypeError):\n func([])",
"def caller():\n\n for func in funcs:\n func()",
"def get_sim_funs_for_blocking():\n return get_sim_funs()",
"def fcheck(*args, **kwargs)->None:\n pass",
"def _find_tainted_callers(self, key_addr, f_addr):\n\n p = self._current_p\n\n self.callsites = []\n # prepare the under-contrainted-based initial state\n # we do not allow untaint as we just want to see where the key string is leading to\n self._core_taint = coretaint.CoreTaint(p, interfunction_level=0, smart_call=False,\n follow_unsat=True,\n try_thumb=True,\n exit_on_decode_error=True, force_paths=True, allow_untaint=False,\n logger_obj=log)\n\n self._current_key_addr = key_addr\n s = self._prepare_state(key_addr, f_addr)\n summarized_f = self._prepare_function_summaries()\n\n self._core_taint.set_alarm(TIMEOUT_TAINT, n_tries=TIMEOUT_TRIES)\n\n try:\n self._core_taint.run(s, (), (), summarized_f=summarized_f, force_thumb=False,\n check_func=self._find_taint_callers, init_bss=False)\n except TimeOutException:\n log.warning(\"Timeout Triggered\")\n except Exception as e:\n log.warning(\"Exception: %s\" % str(e))\n\n self._core_taint.unset_alarm()\n callsites = []\n for cs in self.callsites:\n try:\n if self._current_cfg.get_any_node(cs[0]).function_address == f_addr and cs not in callsites:\n callsites.append(cs)\n except:\n pass\n\n return callsites",
"def _func_only(func):\n if inspect.isfunction(func):\n return\n else:\n raise Exception(\"Only functions can be tasks\")",
"def f_chains(self) -> List[Callable[[], Chain]]:\n return [delayed_run_chain() for _ in range(self.n_chains)]",
"def is_compatible(self, function, arguments):",
"def test_healthy_only_works_for_list_of_functions(self):\n actors = [Actor.remote(i) for i in range(4)]\n manager = FaultTolerantActorManager(actors=actors)\n\n # Mark first and second actor as unhealthy.\n manager.set_actor_state(1, False)\n manager.set_actor_state(2, False)\n\n def f(id, _):\n return id\n\n func = [functools.partial(f, i) for i in range(4)]\n\n manager.foreach_actor_async(func, healthy_only=True)\n results = manager.fetch_ready_async_reqs(timeout_seconds=None)\n\n # Should get results back from calling actor 0 and 3.\n self.assertEqual([r.get() for r in results], [0, 3])\n\n manager.clear()",
"def credibility(*mass_functions):\n #Get the supports of each mass function:\n supports = []\n for mass_function in mass_functions:\n supports.append(mass_function.support(*[x for x in mass_functions if x != mass_function]))\n \n #Compute the credibility of each mass function:\n cred = []\n supportSum = sum(supports)\n for i in range(len(mass_functions)):\n cred.append(round(supports[i]/supportSum, 6))\n return cred",
"def verify_and_freeze(self):\n if self._driver is None and not self._strobers:\n raise ValueError(\n 'internal %s is not driven by anything' % self._name)\n if not self._users:\n raise ValueError(\n 'internal %s is never used' % self._name)\n self._frozen = True",
"def verify_and_freeze(self):\n for internal in self:\n internal.verify_and_freeze()",
"def check_trustlines(self):\n transactions = Transaction.objects.filter(\n status=Transaction.STATUS.pending_trust\n )\n server = settings.HORIZON_SERVER\n for transaction in transactions:\n try:\n account = (\n server.accounts().account_id(transaction.stellar_account).call()\n )\n except BaseHorizonError:\n logger.debug(\"could not load account using provided horizon URL\")\n continue\n try:\n balances = account[\"balances\"]\n except KeyError:\n logger.debug(\"horizon account response had no balances\")\n continue\n for balance in balances:\n try:\n asset_code = balance[\"asset_code\"]\n except KeyError:\n logger.debug(\"horizon balances had no asset_code\")\n continue\n if asset_code == transaction.asset.code:\n call_command(\"create_stellar_deposit\", transaction.id)",
"def check_integrity(self):\r\n nodes = graph.ops(self.inputs, self.outputs)\r\n if self.apply_nodes != nodes:\r\n missing = nodes.difference(self.apply_nodes)\r\n excess = self.apply_nodes.difference(nodes)\r\n raise Exception(\r\n \"The nodes are inappropriately cached. missing, in excess: \",\r\n missing, excess)\r\n for node in nodes:\r\n if node.fgraph is not self:\r\n raise Exception(\"Node should belong to the FunctionGraph.\",\r\n node)\r\n for i, variable in enumerate(node.inputs):\r\n if variable.fgraph is not self:\r\n raise Exception(\r\n \"Input of node should belong to the FunctionGraph.\",\r\n variable, (node, i))\r\n if (node, i) not in variable.clients:\r\n raise Exception(\"Inconsistent clients list.\",\r\n (node, i), variable.clients)\r\n variables = set(graph.variables(self.inputs, self.outputs))\r\n if set(self.variables) != variables:\r\n missing = variables.difference(self.variables)\r\n excess = self.variables.difference(variables)\r\n raise Exception(\r\n \"The variables are inappropriately cached. missing, in excess: \",\r\n missing, excess)\r\n for variable in variables:\r\n if (variable.owner is None and\r\n variable not in self.inputs and\r\n not isinstance(variable, graph.Constant)):\r\n raise Exception(\"Undeclared input.\", variable)\r\n if variable.fgraph is not self:\r\n raise Exception(\"Variable should belong to the FunctionGraph.\",\r\n variable)\r\n for node, i in variable.clients:\r\n if node == 'output':\r\n if self.outputs[i] is not variable:\r\n raise Exception(\"Inconsistent clients list.\",\r\n variable, self.outputs[i])\r\n continue\r\n if node not in nodes:\r\n raise Exception(\"Client not in FunctionGraph.\",\r\n variable, (node, i))\r\n if node.inputs[i] is not variable:\r\n raise Exception(\"Inconsistent clients list.\",\r\n variable, node.inputs[i])",
"def validate(host_calls):\n\n for name, host_call in host_calls.items():\n if not isinstance(host_call, (tuple, list)):\n raise ValueError('{} should be tuple or list'.format(name))\n if len(host_call) != 2:\n raise ValueError('{} should have two elements.'.format(name))\n if not callable(host_call[0]):\n raise TypeError('{}[0] should be callable.'.format(name))\n if not isinstance(host_call[1], (tuple, list, dict)):\n raise ValueError('{}[1] should be tuple or list, or dict.'.format(name))\n\n if isinstance(host_call[1], (tuple, list)):\n fullargspec = tf_inspect.getfullargspec(host_call[0])\n fn_args = util.fn_args(host_call[0])\n # wrapped_hostcall_with_global_step uses varargs, so we allow that.\n if fullargspec.varargs is None and len(host_call[1]) != len(fn_args):\n raise RuntimeError(\n 'In TPUEstimatorSpec.{}, length of tensors {} does not match '\n 'method args of the function, which takes {}.'.format(\n name, len(host_call[1]), len(fn_args)))",
"def test_require_at_least_one_and_several_provided(self):\n _func = at_least_one_of('arg1', 'arg2')(undecorated_func)\n self.assertEqual(_func('ahoy', 'there'), 'foo')\n self.assertEqual(_func(arg1='ahoy', arg2='there'), 'foo')\n self.assertEqual(_func('ahoy', arg2='there', arg3='matey'), 'foo')",
"def defer_to_call(*args, **kwargs):\n data = _func_or_data(*args, **kwargs)\n cls.scheme[func].validate(data)\n return data",
"def accounts():\n pass",
"def process_assertion_list(self, cls, functions):",
"def XXXcheck_function_dependencies(self, node, used_types):\n if node.cxx_template:\n # The templated type will raise an error.\n # XXX - Maybe dummy it out\n # XXX - process templated types\n return\n ast = node.ast\n result_typemap = ast.typemap\n # XXX - make sure it exists\n used_types[result_typemap.name] = result_typemap\n for arg in ast.declarator.params:\n ntypemap = arg.typemap\n if ntypemap.base == \"shadow\":\n used_types[ntypemap.name] = ntypemap",
"def _receptive_fn(self, nodes):\n raise Exception(\" not implemented in base model\")",
"def test_fn_called():\n l = [1, 2, 3, 4, 5]\n for fn in [s7.div, s7.mul, s7.add, \"abcd\", 1234]:\n try:\n f = s7.count_fn_called(fn=fn)\n for i in range(0, random.randint(2, 10)):\n f(*l)\n assert fn in s7.fn_called_dict.keys() and str(s7.fn_called_dict[fn]) in s7.check_all_fn_called(fn)\n except Exception as e:\n assert e.__class__.__name__ == TypeError.__name__"
] | [
"0.5650193",
"0.54443383",
"0.54156166",
"0.537168",
"0.5303468",
"0.51958776",
"0.515159",
"0.5115263",
"0.50573635",
"0.50471103",
"0.5032907",
"0.49691454",
"0.49658495",
"0.49336487",
"0.49330318",
"0.49018726",
"0.48831254",
"0.4877388",
"0.48731956",
"0.4864257",
"0.48636305",
"0.48628193",
"0.48558697",
"0.48461485",
"0.4844546",
"0.48065197",
"0.47980952",
"0.47795188",
"0.47757152",
"0.4750621"
] | 0.7283338 | 0 |
The trustline in between 0 and 1 is frozen, tests that it cannot be used in a mediate transfer | def test_mediate_transfer_fails_if_intermediate_trustline_frozen(
currency_network_contract_with_frozen_trustline, accounts
):
network = currency_network_contract_with_frozen_trustline
path = [accounts[4], accounts[0], accounts[1], accounts[2]]
with pytest.raises(eth_tester.exceptions.TransactionFailed):
network.functions.transfer(10, 10, path, b"").transact({"from": accounts[4]}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_transfer_blocked(chain, token, shareholder1, boogieman):\n\n set_state(chain, token, canTransferFlag=False)\n with pytest.raises(ValueError):\n token.transact({\"from\": shareholder1}).transfer(boogieman, 4000)",
"def test_32(self):\n assert 'False' == Api.requestBlock('test-32')",
"def test_37(self):\n assert 'False' == Api.requestBlock('test-37')",
"def test_25(self):\n assert 'False' == Api.requestBlock('test-25')",
"def test_35(self):\n assert 'False' == Api.requestBlock('test-35')",
"def test_34(self):\n assert 'False' == Api.requestBlock('test-34')",
"def test_15(self):\n assert 'False' == Api.requestBlock('test-15')",
"def test_block_bad_state(self):\n pass",
"def test_transfer_bypass_token(chain, token, carrier, shareholder1, boogieman):\n\n set_state(chain, token, canTransferFlag=True)\n with pytest.raises(ValueError):\n # This call must always come from token contract\n carrier.transact().transfer(shareholder1, boogieman, True)",
"def test_16(self):\n assert 'False' == Api.requestBlock('test-16')",
"def test_13(self):\n assert 'False' == Api.requestBlock('test-13')",
"def test_33(self):\n assert 'False' == Api.requestBlock('test-33')",
"def test_reject_proposal_demand(self):\n pass",
"def violated(self) -> bool:\n ...",
"def test_21(self):\n assert 'False' == Api.requestBlock('test-21')",
"def test_isolate_amount(self):\n self.assertIsNotNone(isolate_amount)",
"def test_14(self):\n assert 'False' == Api.requestBlock('test-14')",
"def test_23(self):\n assert 'False' == Api.requestBlock('test-23')",
"def test_44(self):\n assert 'False' == Api.requestBlock('test-44')",
"def test_is_utility_enhancing(self):\n tx_message = TransactionMessage(\n performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,\n skill_callback_ids=[PublicId(\"author\", \"a_skill\", \"0.1.0\")],\n tx_id=self.tx_id,\n tx_sender_addr=self.tx_sender_addr,\n tx_counterparty_addr=self.tx_counterparty_addr,\n tx_amount_by_currency_id={\"FET\": -20},\n tx_sender_fee=0,\n tx_counterparty_fee=0,\n tx_quantities_by_good_id={\"good_id\": 10},\n ledger_id=\"off_chain\",\n info=self.info,\n tx_nonce=\"Transaction nonce\",\n )\n self.decision_maker.ownership_state._quantities_by_good_id = None\n assert self.decision_maker._is_utility_enhancing(tx_message)",
"def test_52(self):\n assert 'False' == Api.requestBlock('test-52')",
"def reject_fairness(experiment: List[bool]) -> bool:\n num_heads = len([flip for flip in experiment if flip])\n return num_heads < 469 or num_heads > 531",
"def test_27(self):\n assert 'False' == Api.requestBlock('test-27')",
"def test_26(self):\n assert 'False' == Api.requestBlock('test-26')",
"def test_43(self):\n assert 'False' == Api.requestBlock('test-43')",
"def test_28(self):\n assert 'False' == Api.requestBlock('test-28')",
"def test_46(self):\n assert 'False' == Api.requestBlock('test-46')",
"def test_delay_by_proof(self):\n node, other = self.create_nodes(2)\n node.send_identity(other)\n\n # permit NODE\n proof_msg = self._mm.create_authorize([(node.my_member, self._community.get_meta_message(u\"protected-full-sync-text\"), u\"permit\"),\n (node.my_member, self._community.get_meta_message(u\"protected-full-sync-text\"), u\"authorize\")])\n\n # NODE creates message\n tmessage = node.create_protected_full_sync_text(\"Protected message\", 42)\n other.give_message(tmessage, node)\n\n # must NOT have been stored in the database\n other.assert_not_stored(tmessage)\n\n # OTHER sends dispersy-missing-proof to NODE\n _, message = node.receive_message(names=[u\"dispersy-missing-proof\"]).next()\n self.assertEqual(message.payload.member.public_key, node.my_member.public_key)\n self.assertEqual(message.payload.global_time, 42)\n\n # NODE provides proof\n other.give_message(proof_msg, node)\n\n # must have been stored in the database\n other.assert_is_stored(tmessage)",
"def test_24(self):\n assert 'False' == Api.requestBlock('test-24')",
"def test_unsuccessful_verification(self):\n for i in (-4, -3, 3, 4):\n description = \"TOTP verified for `i={0}`\".format(i)\n calculated = self.algorithm.calculate(self.device.secret, drift=i)\n confirmed = self.relate.verify(calculated, save=False)\n\n self.assertFalse(confirmed, description)\n\n self.relate.confirm = False"
] | [
"0.637659",
"0.6006959",
"0.59612626",
"0.59547615",
"0.5950883",
"0.59490114",
"0.59403247",
"0.592359",
"0.59158957",
"0.58958477",
"0.5869624",
"0.58487654",
"0.5818297",
"0.58152175",
"0.58116984",
"0.58068",
"0.5805351",
"0.57946134",
"0.5792743",
"0.5792591",
"0.5785922",
"0.5784385",
"0.5766951",
"0.5761152",
"0.57584333",
"0.5756707",
"0.57556605",
"0.5755629",
"0.5749048",
"0.5746935"
] | 0.6220759 | 1 |
Sets the build_types of this ProblemScope. | def build_types(self, build_types):
self._build_types = build_types | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_type(self, build_type):\n\n self._build_type = build_type",
"def scm_types(self, scm_types):\n\n self._scm_types = scm_types",
"def types(self, types):\n\n self._types = types",
"def types(self, types: List[str]):\n if types is None:\n raise ValueError(\"Invalid value for `types`, must not be `None`\")\n\n self._types = types",
"def __init__(self, project=None, build_types=None, build_type=None, teamcity=None): # noqa: E501 # noqa: E501\n\n self._project = None\n self._build_types = None\n self._build_type = None\n self.discriminator = None\n\n if project is not None:\n self.project = project\n if build_types is not None:\n self.build_types = build_types\n if build_type is not None:\n self.build_type = build_type\n super(ProblemScope, self).__init__(teamcity=teamcity)",
"def build_options(self, build_options):\n\n self._build_options = build_options",
"def concurrent_builds(self, concurrent_builds):\n\n self._concurrent_builds = concurrent_builds",
"def define_containers(self, *types: ContainerType) -> None:\n\n for container_type in types:\n self.containers[container_type] = self.onefuzz.utils.build_container_name(\n container_type=container_type,\n project=self.project,\n name=self.name,\n build=self.build,\n platform=self.platform,\n )",
"def product_types(self, product_types):\n\n self._product_types = product_types",
"def builds(self):\n builds = [b for b in self.statuses() if b[\"type\"] == \"build\"]\n for build in builds:\n yield Build(build, **self._new_session_args)\n\n return",
"def build(self, build):\n\n self._build = build",
"def case_rebuild_types(cls):\n return functools.reduce(lambda x, y: x | y, [\n cls.TYPE_REBUILD_FORM_ARCHIVED,\n cls.TYPE_REBUILD_FORM_EDIT,\n cls.TYPE_REBUILD_USER_ARCHIVED,\n cls.TYPE_REBUILD_USER_REQUESTED,\n cls.TYPE_REBUILD_WITH_REASON,\n cls.TYPE_REBUILD_FORM_REPROCESS,\n ])",
"def build_type(self) -> Optional[pulumi.Input['BuildTypeArgs']]:\n return pulumi.get(self, \"build_type\")",
"def scheduled_builds(self, scheduled_builds):\n\n self._scheduled_builds = scheduled_builds",
"def item_types(self, item_types):\n\n self._item_types = item_types",
"def transmission_types(self, transmission_types):\n\n self._transmission_types = transmission_types",
"def type_ids(self, type_ids):\n\n self._type_ids = type_ids",
"def setEntityTypes(self, value):\n return self._set(entityTypes=value)",
"def period_types(self, period_types):\n\n self._period_types = period_types",
"def resource_types(self, resource_types: ConfigNodePropertyArray):\n\n self._resource_types = resource_types",
"def build_type(self):\n return self._build_type",
"def set_build(self, build):\n self.build = build\n if not self.record:\n return\n self.mdb.results.update({'_id':self.result_id}, \n {'$set':{'build':build}})",
"def build_type(self) -> pulumi.Output['outputs.BuildTypeResponse']:\n return pulumi.get(self, \"build_type\")",
"def setBuildFromTokens(self, b):\n return self._set(buildFromTokens=b)",
"def setBuildFromTokens(self, b):\n return self._set(buildFromTokens=b)",
"def policy_types(self, policy_types):\n\n self._policy_types = policy_types",
"def build_number(self, build_number):\n\n self._build_number = build_number",
"def scsi_adapter_types(self, scsi_adapter_types):\n\n self._scsi_adapter_types = scsi_adapter_types",
"def _set_gal_types(self):\n _gal_type_list = []\n for component_model in self.model_dictionary.values():\n _gal_type_list.append(component_model.gal_type)\n self.gal_types = set(list(_gal_type_list))",
"def UpdateBuilds(builds):\n\n # The build data file records the last build number for which we\n # generated a report. When we generate the next report, we read\n # this data and increment it to get the new data; when we finish\n # generating the reports, we write the updated values into this file.\n # NOTE: One side effect of doing this at the end: If the script\n # fails in the middle of generating a report, this data does not get\n # updated.\n with open(BUILD_DATA_FILE, 'w') as fp:\n gcc_max = 0\n llvm_max = 0\n for b in builds:\n if b[0] == GCC_ROTATING_BUILDER:\n gcc_max = max(gcc_max, b[1])\n elif b[0] == LLVM_ROTATING_BUILDER:\n llvm_max = max(llvm_max, b[1])\n else:\n fp.write('%s,%d\\n' % (b[0], b[1]))\n if gcc_max > 0:\n fp.write('%s,%d\\n' % (GCC_ROTATING_BUILDER, gcc_max))\n if llvm_max > 0:\n fp.write('%s,%d\\n' % (LLVM_ROTATING_BUILDER, llvm_max))"
] | [
"0.65033895",
"0.6066945",
"0.5788191",
"0.5584121",
"0.5559509",
"0.52263755",
"0.50761986",
"0.4917929",
"0.49125266",
"0.48962516",
"0.4873129",
"0.47903404",
"0.4772615",
"0.4762409",
"0.47446603",
"0.4708357",
"0.4683082",
"0.46532643",
"0.46285158",
"0.45791152",
"0.456799",
"0.4559719",
"0.45451015",
"0.45299143",
"0.45299143",
"0.45223323",
"0.4505289",
"0.44901434",
"0.4486134",
"0.44787344"
] | 0.7905692 | 0 |
Sets the build_type of this ProblemScope. | def build_type(self, build_type):
self._build_type = build_type | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_types(self, build_types):\n\n self._build_types = build_types",
"def build_type(self) -> Optional[pulumi.Input['BuildTypeArgs']]:\n return pulumi.get(self, \"build_type\")",
"def __init__(self, project=None, build_types=None, build_type=None, teamcity=None): # noqa: E501 # noqa: E501\n\n self._project = None\n self._build_types = None\n self._build_type = None\n self.discriminator = None\n\n if project is not None:\n self.project = project\n if build_types is not None:\n self.build_types = build_types\n if build_type is not None:\n self.build_type = build_type\n super(ProblemScope, self).__init__(teamcity=teamcity)",
"def build_type(self):\n return self._build_type",
"def build_type(self) -> pulumi.Output['outputs.BuildTypeResponse']:\n return pulumi.get(self, \"build_type\")",
"def build(self, build):\n\n self._build = build",
"def setHgType(self, hgTypeToSet):\n self.hgType = hgTypeToSet",
"def get_build_type(self):\n build_type_exports = self.export.findall('build_type')\n if len(build_type_exports) == 1:\n return build_type_exports[0].text\n raise InvalidPackage('Only one <build_type> element is permitted.')",
"def type(self, type):\n allowed_values = [\"WORKSPACE\", \"PLAN\", \"FLOW\", \"ACTION\", \"CONNECTION\", \"RESOURCE\", \"ENGINE\", \"CLUSTER\", \"FOLDER\"] # noqa: E501\n if type not in allowed_values:\n raise ValueError(\n \"Invalid value for `type` ({0}), must be one of {1}\" # noqa: E501\n .format(type, allowed_values)\n )\n\n self._type = type",
"def build_number(self, build_number):\n\n self._build_number = build_number",
"def type(self, type):\n if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501\n raise ValueError(\"Invalid value for `type`, must not be `None`\") # noqa: E501\n\n self._type = type",
"def type(self, type):\n if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501\n raise ValueError(\"Invalid value for `type`, must not be `None`\") # noqa: E501\n\n self._type = type",
"def type(self, type):\n if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501\n raise ValueError(\"Invalid value for `type`, must not be `None`\") # noqa: E501\n\n self._type = type",
"def type(self, type):\n if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501\n raise ValueError(\"Invalid value for `type`, must not be `None`\") # noqa: E501\n\n self._type = type",
"def job_type(self, job_type):\n self._job_type = job_type",
"def get_building_type(self): # -> BuildingType\n return BuildingType.PRESTIGE",
"def platform_type(self, platform_type):\n self._platform_type = platform_type",
"def set_type(self, type):\n self.type = type",
"def set_type(self, type):\n self.type = type",
"def set_type(self, type):\n self._type = type",
"def setHgType(self, hgTypeToSet):\n self.huntGroup.setHgType(hgTypeToSet)",
"def grid_type(self, value):\n # may want a check on value\n self._grid_type = value",
"def set_type(self, type: int):\r\n self.type = type\r\n self.canvas.itemconfig(self.item, image=self._get_image())",
"def set_build(self, build):\n self.build = build\n if not self.record:\n return\n self.mdb.results.update({'_id':self.result_id}, \n {'$set':{'build':build}})",
"def type(self, type: str):\n\n self._type = type",
"def change_type(self, change_type):\n\n self._change_type = change_type",
"def setDistributionType(self, distribution_type):\n self._distribution_type = distribution_type",
"def set_execution_type(self, type):\n self.execution_type = type",
"def SetType(self, ct_type):\r\n\r\n self._type = ct_type",
"def type(self, type):\n if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501\n raise ValueError(\"Invalid value for `type`, must not be `None`\") # noqa: E501\n allowed_values = [\"date\", \"enumeration\", \"number\", \"string\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and type not in allowed_values: # noqa: E501\n raise ValueError(\"Invalid value for `type` ({0}), must be one of {1}\".format(type, allowed_values)) # noqa: E501\n\n self._type = type"
] | [
"0.625394",
"0.59458286",
"0.5855856",
"0.577268",
"0.5698405",
"0.5533615",
"0.5451951",
"0.54216546",
"0.5392611",
"0.53884697",
"0.5381613",
"0.5381613",
"0.5381613",
"0.5381613",
"0.53579956",
"0.53508645",
"0.53253186",
"0.5323894",
"0.5323894",
"0.5235474",
"0.5209653",
"0.5164763",
"0.5124121",
"0.51085544",
"0.5062014",
"0.50534534",
"0.50437105",
"0.5038259",
"0.5036451",
"0.5029612"
] | 0.7677981 | 0 |
A weighted version of keras.objectives.categorical_crossentropy for keras (2.0.6). This lets you apply a weight to unbalanced classes. | def weighted_categorical_crossentropy(weights):
weights = K.variable(weights)
def loss(y_true, y_pred):
# scale predictions so that the class probas of each sample sum to 1
y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
# clip to prevent NaN's and Inf's
y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())
# calc
loss = y_true * K.log(y_pred) * weights
loss = -K.sum(loss, -1)
return loss
return loss | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def weighted_categorical_crossentropy(weights):\n weights = K.variable(weights)\n\n def loss(y_true, y_pred):\n y_hat = y_pred / K.sum(y_pred, axis=-1, keepdims=True)\n y_hat = K.clip(y_hat, K.epsilon(), 1 - K.epsilon())\n loss = y_true * K.log(y_hat) * weights\n loss = - K.sum(loss) / K.cast(K.shape(y_true)[0] * K.shape(y_true)[1] * K.shape(y_true)[2], 'float')\n return loss\n\n return loss",
"def weighted_categorical_crossentropy(y_true, y_pred):\n # Scale preds so that the class probas of each sample sum to 1\n y_pred /= K.sum(y_pred, axis=-1, keepdims=True) # (b, H, W, Q)\n\n # Avoid numerical instability with epsilon clipping\n y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon()) # (b, H, W, Q)\n\n # Calculate categorical cross entropy\n cross_entropy = tf.multiply(y_true, K.log(y_pred)) # (b, H, W, Q)\n cross_entropy = tf.reduce_sum(cross_entropy, axis=3) # (b, H, W)\n\n # Get weights\n weights = get_weights(y_true, prior_probs, params) # (b, H, W)\n\n # Calculate weighted categorical cross entropy\n weighted_cross_entropy = tf.multiply(weights, cross_entropy) #(b, H, W)\n #weighted_cross_entropy = tf.reduce_sum(weighted_cross_entropy) # ()\n weighted_cross_entropy = - weighted_cross_entropy # ()\n\n return weighted_cross_entropy",
"def weighted_categorical_crossentropy_wrapper(prior_probs, params):\n\n def weighted_categorical_crossentropy(y_true, y_pred):\n \"\"\"Calculates the weighted categorical cross entropy.\n\n Input:\n y_true: Converted ground truth color Z = H_gt(Y)^(-1) with\n shape (b, H, W, Q)\n y_pred: Prediction Z_hat with shape (b, H, W, Q)\n\n Return:\n weighted_cross_entropy: Weighted categorical cross entropy (value)\n\n Note that the following code is based on the function\n categorical_crossentropy defined in keras/keras/backend/cntk_backend.py\n (see github: keras-team/keras).\n \"\"\"\n # Scale preds so that the class probas of each sample sum to 1\n y_pred /= K.sum(y_pred, axis=-1, keepdims=True) # (b, H, W, Q)\n\n # Avoid numerical instability with epsilon clipping\n y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon()) # (b, H, W, Q)\n\n # Calculate categorical cross entropy\n cross_entropy = tf.multiply(y_true, K.log(y_pred)) # (b, H, W, Q)\n cross_entropy = tf.reduce_sum(cross_entropy, axis=3) # (b, H, W)\n\n # Get weights\n weights = get_weights(y_true, prior_probs, params) # (b, H, W)\n\n # Calculate weighted categorical cross entropy\n weighted_cross_entropy = tf.multiply(weights, cross_entropy) #(b, H, W)\n #weighted_cross_entropy = tf.reduce_sum(weighted_cross_entropy) # ()\n weighted_cross_entropy = - weighted_cross_entropy # ()\n\n return weighted_cross_entropy\n\n return weighted_categorical_crossentropy",
"def weighted_bce(y_hat, target, labels, weight_multiplier):\n weight = labels.clone()\n weight.requires_grad = False\n weight[weight < 1] = 2\n weight[weight == 3] = 4\n weight = 2 / weight\n\n # apply mask\n weight = weight * weight_multiplier\n return F.binary_cross_entropy(y_hat, target, weight)",
"def weighted_sigmoid_binary_crossentropy(output, target, weight=1.):\n l = (1. + (weight - 1. ) * target)\n loss = (1. - target ) * output + l * ( T.log(1. + T.exp( - T.abs_(output)))\n + T.maximum(-output, 0))\n return loss",
"def categorical_crossentropy(predictions, targets):\n return theano.tensor.nnet.categorical_crossentropy(predictions, targets)",
"def categorical_crossentropy(predictions, targets):\n return theano.tensor.nnet.categorical_crossentropy(predictions, targets)",
"def weighted_binary_cross_entropy_from_probs(target,\n output,\n positive_class_weight=None,\n negative_class_weight=None):\n epsilon = tf.constant(_EPSILON, dtype=output.dtype.base_dtype)\n output = tf.clip_by_value(output, epsilon, 1. - epsilon)\n if positive_class_weight is None:\n positive_class_weight = tf.constant(1.0, dtype=output.dtype.base_dtype)\n else:\n positive_class_weight = tf.constant(\n positive_class_weight, dtype=output.dtype.base_dtype)\n\n if negative_class_weight is None:\n negative_class_weight = tf.constant(1.0, dtype=output.dtype.base_dtype)\n else:\n negative_class_weight = tf.constant(\n negative_class_weight, dtype=output.dtype.base_dtype)\n\n # Compute cross entropy from probabilities.\n target = tf.cast(target, output.dtype.base_dtype)\n bce = positive_class_weight * target * tf.math.log(output + epsilon)\n bce += negative_class_weight * (1 - target) * tf.math.log(1 - output +\n epsilon)\n return -bce",
"def calculate_weights(y_train: np.ndarray) -> np.ndarray:\n\n weight_class = class_weight.compute_class_weight('balanced', np.unique(y_train), y_train)\n return weight_class",
"def classweighted_mortality_loss(class_weights):\n\n def tmp_mortality_loss(y_true, y_pred):\n sample_weights = (1 - y_true) * class_weights[0] + y_true * class_weights[1]\n bce = K.binary_crossentropy(y_true, y_pred)\n return K.mean(sample_weights * bce, axis=-1)\n\n return tmp_mortality_loss",
"def make_weights_for_balanced_classes(self):\n\n count = [0] * self.get_num_classes()\n\n # label = self.class_map_dict[self.meta_data.loc[image_id]['dx']]\n # labels = [self.class_map_dict[l] for l in self.get_labels()]\n\n labels = self.get_labels()\n\n # Count how many instances there are for each class\n for l in labels:\n count[l] += 1\n\n weight_per_class = [0.] * self.get_num_classes()\n\n N = float(sum(count))\n\n # Assign a weight which is inversely proportional to class frequency\n for i in range(self.get_num_classes()):\n weight_per_class[i] = N/float(count[i])\n\n # Save results for debugging purposes\n self._weight_per_class = weight_per_class\n\n # Now assign a weight to each data point\n weight = [0] * len(labels)\n\n for idx, val in enumerate(labels):\n weight[idx] = weight_per_class[val]\n\n return weight",
"def weighted_binary_crossentropy(target, output):\n # transform back to logits\n _epsilon = tfb._to_tensor(tfb.epsilon(), output.dtype.base_dtype)\n output = tf.clip_by_value(output, _epsilon, 1 - _epsilon)\n output = tf.log(output / (1 - output))\n # compute weighted loss\n loss = tf.nn.weighted_cross_entropy_with_logits(targets=target,\n logits=output,\n pos_weight=POS_WEIGHT)\n return tf.reduce_mean(loss, axis=-1)",
"def softmax_categorical_crossentropy(y_pred, y_true):\n with tf.name_scope(\"SoftmaxCrossentropy\"):\n return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_pred,\n y_true))",
"def loss(labels, logits):\n return sparse_categorical_crossentropy(labels, logits, from_logits=True)",
"def calculate_class_weights(y_train, label_encoder):\n if label_encoder.classes_.size != 2:\n y_train = label_encoder.inverse_transform(np.argmax(y_train, axis=1))\n\n # Balanced class weights\n weights = class_weight.compute_class_weight(\"balanced\",\n np.unique(y_train),\n y_train)\n class_weights = dict(enumerate(weights))\n\n # Manual class weights for CBIS-DDSM\n #class_weights = {0: 1.0, 1:1.5}\n\n # No class weights\n #class_weights = None\n\n if config.verbose_mode:\n print(\"Class weights: {}\".format(str(class_weights)))\n\n return class_weights",
"def weighted_softmax_cross_entropy(\n logits,\n labels,\n label_smoothing = 0.0,\n weights = 1.0,\n loss_reduction = LossReductionType\n .SUM_BY_NONZERO_WEIGHTS,\n background_weight = 1.0,\n **kwargs):\n del kwargs\n check_shape_equal(logits, labels)\n\n labels = labels.astype(logits.dtype)\n if label_smoothing > 0:\n num_classes = labels.shape[-1]\n smooth_weight = label_smoothing / num_classes\n smooth_weight = jnp.array(smooth_weight, dtype=logits.dtype)\n labels = (1. - label_smoothing) * labels + smooth_weight\n\n logits = jax.nn.log_softmax(logits)\n loss = -labels * logits\n\n # Apply background class weights\n class_weights = np.ones(loss.shape)\n class_weights[Ellipsis, :1] = background_weight # Background is class 0.\n loss = loss * jnp.array(class_weights)\n\n loss = jnp.sum(loss, axis=-1)\n return compute_weighted_loss(loss, weights, logits.dtype, loss_reduction)",
"def weighted_cross_entropy_loss(logits, labels, weights=None, label_smoothing=0.0, dtype=jnp.float32):\n if logits.ndim != labels.ndim + 1:\n raise ValueError(f'Incorrect shapes. Got shape {logits.shape} logits and {labels.shape} targets')\n num_classes = logits.shape[-1]\n off_value = label_smoothing / num_classes\n on_value = 1. - label_smoothing + off_value\n soft_targets = onehot(labels, num_classes, on_value=on_value, off_value=off_value, dtype=dtype)\n logp = jax.nn.log_softmax(logits.astype(dtype))\n loss = jnp.sum(logp * soft_targets, axis=-1)\n if weights is not None:\n loss = loss * weights\n return -loss.mean()",
"def Weighted_Cross_Entropy(y_true, y_pred, eps = 1e-10):\n y_pred = tf.cast(y_pred, 'float64')\n y_true = tf.cast(y_true, 'float64')\n # deduce weights based on true pixel value\n class_weights = weights * y_true\n # compute your (unweighted) softmax cross entropy loss\n unweighted_losses = y_true*tf.math.log(y_pred + eps)\n ##print(unweighted_losses.dtype, weights.dtype)\n weighted_losses = unweighted_losses * class_weights\n # reduce the result to get your final loss\n loss = -tf.reduce_sum(weighted_losses)\n return loss",
"def weighted_cross_entropy_loss(labels, log_dampened=False):\r\n if isinstance(labels, list):\r\n all_labels = np.array(labels)\r\n _, weights = np.unique(labels, return_counts=True)\r\n weights = weights / float(np.sum(weights))\r\n weights = np.sum(weights) / weights\r\n if log_dampened:\r\n weights = 1 + np.log(weights)\r\n loss = nn.CrossEntropyLoss(\r\n weight=torch.from_numpy(weights).type('torch.FloatTensor')\r\n )\r\n return loss",
"def my_weighted_softmax_cross_entropy(preds, labels, weights):\n loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=labels)\n # loss = tf.nn.weighted_cross_entropy_with_logits(logits=preds, targets=labels, pos_weight=0.1)\n # norm_wts = tf.norm(weights, axis=0)\n norm_wts = weights/tf.reduce_mean(weights)\n return tf.reduce_mean(tf.math.multiply(loss, norm_wts))",
"def pixel_weighted_cross_entropy(y_true, y_pred, reduction=True):\n y_bin_true = tf.cast(y_true > 0, y_true.dtype)\n loss_val = keras.losses.binary_crossentropy(y_bin_true, y_pred)\n weights = tf.where(y_true > 0, y_true, 1.0)\n loss_val = tf.convert_to_tensor(tf.squeeze(weights), np.float32) * loss_val\n if reduction == True:\n return K.mean(loss_val)\n elif reduction == False:\n return loss_val",
"def crossentropy_loss(y_true, y_pred):\n ce = tf.keras.losses.categorical_crossentropy(y_true, y_pred, from_logits=True) \n return ce",
"def weighted_cross_entropy_loss(outputs, labels, sample_weight=None, **kwargs):\n if sample_weight is None:\n return F.cross_entropy(outputs, labels, reduction=\"mean\")\n else:\n result = F.cross_entropy(outputs, labels, reduction=\"none\", **kwargs)\n assert result.size()[0] == sample_weight.size()[0]\n return (sample_weight * result).sum() / sample_weight.sum()",
"def cross_entropy(input, target, weight):\n loss = 0\n batch_size = target.data.shape[1]\n target = target.max(2)[1]\n input = input.transpose(0, 1)\n target = target.transpose(0, 1)\n weight = weight.transpose(0, 1)\n total_weight = torch.sum(weight, dim=1).cpu().data.numpy()\n for i in range(batch_size):\n partial_loss = F.cross_entropy(input[i], target[i], size_average=False, reduce=False)\n partial_loss = torch.mul(partial_loss, weight[i])\n loss = loss + torch.sum(partial_loss) / float(total_weight[i])\n return loss / batch_size",
"def prepare_class_weight_map(y_true, weights):\n y_true_one_hot = tf.one_hot(y_true, tf.shape(weights)[0])\n weight_map = tf.tensordot(y_true_one_hot, weights, axes=1)\n return weight_map",
"def binary_cross_entropy(y_true, y_preds):\n return np.sum(y_true * np.log(y_preds) + (1 - y_true) * np.log(1 - y_preds))",
"def calculate_class_weights(label_data):\n neg, pos = np.bincount(label_data)\n weight_for_0 = 1 / neg\n weight_for_1 = 1 / pos\n return {0: weight_for_0, 1: weight_for_1}",
"def calculate_class_weights(labels):\n class_counts = sorted(Counter(labels).items())\n num_items = [x[1] for x in class_counts]\n weights = [min(num_items)/x for x in num_items]\n\n return torch.tensor(weights)",
"def _ragged_tensor_categorical_crossentropy(y_true,\n y_pred,\n from_logits=False,\n label_smoothing=0,\n axis=-1):\n fn = functools.partial(\n categorical_crossentropy,\n from_logits=from_logits,\n label_smoothing=label_smoothing,\n axis=axis)\n return _ragged_tensor_apply_loss(fn, y_true, y_pred)",
"def weighted_cross_entropy_with_logits(pos_weight=1., label_smoothing=0):\n\n # @tf.function\n def loss(y_true, y_pred):\n labels = tf.cast(y_true, tf.float32)\n logits = tf.cast(y_pred, tf.float32)\n\n if label_smoothing > 0:\n # label smoothing between binary classes (Szegedy et al. 2015)\n labels *= 1.0 - label_smoothing\n labels += 0.5 * label_smoothing\n\n return tf.reduce_mean(\n tf.nn.weighted_cross_entropy_with_logits(\n labels=labels, logits=logits, pos_weight=pos_weight),\n axis=-1)\n\n return loss"
] | [
"0.8203031",
"0.79314363",
"0.7234755",
"0.71510255",
"0.702699",
"0.70055",
"0.70055",
"0.69127864",
"0.6847676",
"0.67895746",
"0.6735132",
"0.66833335",
"0.6594962",
"0.6553082",
"0.6502841",
"0.6399763",
"0.63869053",
"0.63820964",
"0.63813084",
"0.6356557",
"0.6352567",
"0.6342557",
"0.63228136",
"0.631922",
"0.6315139",
"0.6307284",
"0.62795186",
"0.6274954",
"0.6233556",
"0.6231411"
] | 0.842637 | 0 |
Compare the current token type with the passed token type and if they match then "eat" the current token and assign the next token to the self.current_token otherwise raise an exception. | def eat(self, token_type):
if self.current_token.type == token_type:
self.current_token = self.lexer.get_next_token()
# print(self.current_token)
else:
self.error() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def match_type(self, token_type):\n if isinstance(self.cursor(), token_type):\n token = self.cursor()\n self.pos += 1\n else:\n raise ParseError(\"Expected {!s}.\".format(token_type))\n return token",
"def next_token(self, context: PluginScanContext, token: MarkdownToken) -> None:\n if token.is_atx_heading:\n atx_token = cast(AtxHeadingMarkdownToken, token)\n self.__handle_atx_heading(context, atx_token)\n elif token.is_setext_heading:\n setext_token = cast(SetextHeadingMarkdownToken, token)\n self.__handle_setext_heading(setext_token)\n elif token.is_text:\n text_token = cast(TextMarkdownToken, token)\n self.__handle_text(text_token)\n elif token.is_setext_heading_end:\n end_token = cast(EndMarkdownToken, token)\n self.__handle_setext_heading_end(context, end_token)",
"def maybe_advance(self, expected_type):\n token = self._get_token()\n if token and token.type == expected_type:\n self.pos = token.pos\n return token.value\n return None",
"def next_token(self, context, token):",
"def _consume(self, token_type, msg):\n if self._check(token_type):\n return self._advance()\n\n raise self._error(self._peek(), msg)",
"def consume_if(self, tok_type: str) -> Token:\n curr = self.current\n if curr.tok_type != tok_type:\n raise ExprSyntaxError\n self.pos += 1\n return curr",
"def _check(self, token_type):\n if self._is_at_end():\n return False\n\n return self._peek().token_type == token_type",
"def next_token(self) -> T.Optional[Token]:\n if self.has_finished():\n return None\n token_type = None\n token_chars = []\n if is_number_char(self.current):\n token_type = \"N\"\n while not self.has_finished() and is_number_char(self.current):\n token_chars.append(self.consume())\n elif is_char_token(self.current):\n if self.current in [\"(\", \")\"]:\n token_type = self.current\n elif self.current in [\"+\", \"-\"]:\n token_type = \"S\"\n elif self.current in [\"*\", \"/\"]:\n token_type = \"M\"\n else:\n raise ExprSyntaxError\n token_chars.append(self.consume())\n elif self.current.isspace():\n self.consume()\n return self.next_token()\n else:\n raise UnexpectedChar\n return Token(token_type, \"\".join(token_chars))",
"def advance(self, expected_type):\n token = self._get_token()\n self._assert_token_type(token, expected_type)\n self.pos = token.pos\n return token.value",
"def match_value(self, token_type, token_value):\n if isinstance(self.cursor(), token_type) and self.cursor().token == token_value:\n token = self.cursor()\n self.pos += 1\n else:\n raise ParseError(\"Expected {!s}.\".format(token_value))\n return token",
"def next_token(self, context, token):\n if token.is_unordered_list_start:\n self.__handle_unordered_list_start(context, token)\n elif token.is_ordered_list_start:\n self.__handle_ordered_list_start(token)\n elif token.is_unordered_list_end or token.is_ordered_list_end:\n self.__handle_list_end(context, token)\n elif token.is_new_list_item:\n self.__handle_list_item(context, token)",
"def next_token(self, context: PluginScanContext, token: MarkdownToken) -> None:\n if token.is_list_start:\n self.__list_stack.append(token)\n if token.is_ordered_list_start:\n list_style, last_known_number = self.__match_first_item(context, token)\n self.__ordered_list_stack.append((list_style, last_known_number))\n elif token.is_list_end:\n del self.__list_stack[-1]\n if token.is_ordered_list_end:\n del self.__ordered_list_stack[-1]\n elif token.is_new_list_item and self.__list_stack[-1].is_ordered_list_start:\n list_style, last_known_number = self.__ordered_list_stack[-1]\n list_style, last_known_number = self.__match_non_first_items(\n context, token, list_style, last_known_number\n )\n self.__ordered_list_stack[-1] = (list_style, last_known_number)",
"def next_token(self, context: PluginScanContext, token: MarkdownToken) -> None:\n if token.is_atx_heading:\n atx_token = cast(AtxHeadingMarkdownToken, token)\n if not atx_token.remove_trailing_count:\n self.__atx_heading_token = token\n elif token.is_paragraph_end:\n self.__atx_heading_token = None\n elif token.is_text:\n text_token = cast(TextMarkdownToken, token)\n resolved_extracted_whitespace = ParserHelper.remove_all_from_text(\n text_token.extracted_whitespace\n )\n if self.__atx_heading_token and len(resolved_extracted_whitespace) > 1:\n self.report_next_token_error(context, self.__atx_heading_token)",
"def _match(self, *token_types):\n for token in token_types:\n if self._check(token):\n self._advance()\n return True\n\n return False",
"def __next__(self):\n if self.gen is None:\n self.gen = self.token_generator()\n\n tok = next(self.gen)\n while tok.type in self.IGNORED_TOKENS:\n tok = next(self.gen)\n return tok",
"def next_token(self, tok, include_extra=False):\n # type: (Token, bool) -> Token\n i = tok.index + 1\n if not include_extra:\n while is_non_coding_token(self._tokens[i].type):\n i += 1\n return self._tokens[i]",
"def find_token(self, start_token, tok_type, tok_str=None, reverse=False):\n # type: (Token, int, Optional[str], bool) -> Token\n t = start_token\n advance = self.prev_token if reverse else self.next_token\n while not match_token(t, tok_type, tok_str) and not token.ISEOF(t.type):\n t = advance(t, include_extra=True)\n return t",
"def _assert_token_type(self, token, expected_type):\n if token and token.type == expected_type:\n return\n # Skip whitespace to make the error message more useful.\n pos = self._skip_whitespace()\n raise CppParsingError(expected_type, self.body, pos, self.file_path,\n self.line_number)",
"def get_next_token(self):\n\t\t\n\t\tif self.pos > len(self.text)-1:\n\t\t\treturn Token(EOF, None)\n\t\t\t\n\t\tcurrent_char = self.text[self.pos]\n\t\t\n\t\tif current_char.isdigit() or current_char.isalpha():",
"def expect(self, expected_token):\n if isinstance(self.next(), expected_token):\n self.consume()\n else:\n raise ParserError(\n \"expected {expected} but found {found}.\".format(\n expected=expected_token,\n found=self.next()\n )\n )",
"def gettok(self):\n try:\n self.next = next(self.tokens)\n except StopIteration:\n self.next = None",
"def here(self, type):\n # Get the token ahead of the current index.\n possibleIndexEosToken = self.getCurrentToken().tokenIndex - 1\n ahead = self._input.get(possibleIndexEosToken)\n\n # Check if the token resides on the HIDDEN channel and if it is of the\n # provided type.\n return (ahead.channel == Lexer.HIDDEN) and (ahead.type == type)",
"def _add_token(self, token: _TokenT) -> None:\n grammar = self._pgen_grammar\n stack = self.stack\n # pyre-fixme[6]: Expected `_TokenTypeT` for 2nd param but got `TokenType`.\n transition = _token_to_transition(grammar, token.type, token.string)\n\n while True:\n try:\n plan = stack[-1].dfa.transitions[transition]\n break\n except KeyError:\n if stack[-1].dfa.is_final:\n try:\n self._pop()\n except PartialParserSyntaxError as ex:\n # Upconvert the PartialParserSyntaxError to a ParserSyntaxError\n # by backfilling the line/column information.\n raise ParserSyntaxError(\n ex.message,\n lines=self.lines,\n raw_line=token.start_pos[0],\n raw_column=token.start_pos[1],\n )\n except Exception as ex:\n # convert_nonterminal may fail due to a bug in our code. Try to\n # recover enough to at least tell us where in the file it\n # failed.\n raise ParserSyntaxError(\n f\"Internal error: {ex}\",\n lines=self.lines,\n raw_line=token.start_pos[0],\n raw_column=token.start_pos[1],\n )\n else:\n # We never broke out -- EOF is too soon -- Unfinished statement.\n #\n # BUG: The `expected_str` may not be complete because we already\n # popped the other possibilities off the stack at this point, but\n # it still seems useful to list some of the possibilities that we\n # could've expected.\n expected_str = get_expected_str(\n token, stack[-1].dfa.transitions.keys()\n )\n raise ParserSyntaxError(\n f\"Incomplete input. {expected_str}\",\n lines=self.lines,\n raw_line=token.start_pos[0],\n raw_column=token.start_pos[1],\n )\n except IndexError:\n # I don't think this will ever happen with Python's grammar, because if\n # there are any extra tokens at the end of the input, we'll instead\n # complain that we expected ENDMARKER.\n #\n # However, let's leave it just in case.\n expected_str = get_expected_str(token, EOFSentinel.EOF)\n raise ParserSyntaxError(\n f\"Too much input. {expected_str}\",\n lines=self.lines,\n raw_line=token.start_pos[0],\n raw_column=token.start_pos[1],\n )\n\n # Logically, `plan` is always defined, but pyre can't reasonably determine that.\n stack[-1].dfa = plan.next_dfa\n\n for push in plan.dfa_pushes:\n stack.append(StackNode(push))\n\n leaf = self.convert_terminal(token)\n stack[-1].nodes.append(leaf)",
"def match(self, token):\n\n if self.la == token:\n self.la, self.val = self.next_token()\n else:\n raise ParseError(\"found {} instead of {}\".format(self.la, token))",
"def get_next_token(self):\n text = self.text\n\n # is self.pos index past the end of the self.text ?\n # if so, then return EOF token because there is no more\n # input left to convert into tokens\n self.ignore_whitespaces()\n if self.pos > len(text) - 1:\n return Token(EOF, None)\n #### Logic for ignoring whitespaces and handling multiple\n #### digit input\n current_char = text[self.pos]\n\n if current_char == '+':\n token = Token(PLUS, current_char)\n self.pos += 1\n return token\n elif current_char == '-':\n token = Token(MINUS, current_char)\n self.pos += 1\n return token\n elif current_char == '*':\n token = Token(MULTIPLY, current_char)\n self.pos += 1\n return token\n elif current_char == '/':\n token = Token(DIVIDE, current_char)\n self.pos += 1\n return token\n\n value_str = \"\"\n while (current_char.isdigit() or current_char == '.'):\n value_str = value_str + current_char\n self.pos += 1\n if self.pos < len(text):\n current_char = text[self.pos]\n else:\n break\n\n if value_str.find('.') > -1:\n try:\n value = float(value_str)\n except ValueError:\n print \"Could not convert {value_str} to a float\".format(value_str)\n self.error()\n token = Token(FLOAT, value)\n return token\n elif value_str[0].isdigit():\n try:\n value = int(value_str)\n except ValueError:\n print \"Could not convert {value_str} to an Interger\".format(value_str)\n self.error()\n token = Token(INTEGER,value)\n return token\n\n\n ####\n \"\"\"\n # get a character at the position self.pos and decide\n # what token to create based on the single character\n current_char = text[self.pos]\n\n # if the character is a digit then convert it to\n # integer, create an INTEGER token, increment self.pos\n # index to point to the next character after the digit,\n # and return the INTEGER token\n if current_char.isdigit():\n token = Token(INTEGER, int(current_char))\n self.pos += 1\n return token\n\n if current_char == '+':\n token = Token(PLUS, current_char)\n self.pos += 1\n return token\n\n if current_char == '-':\n token = Token(MINUS, current_char)\n self.pos += 1\n return token\n\n if current_char == '*':\n token = Token(MULTIPLY, current_char)\n self.pos += 1\n return token\n\n if current_char == '/':\n token = Token(DIVIDE, current_char)\n self.pos += 1\n return token\n \"\"\"\n self.error()",
"def get_next_token(self):\n while self.current_char is not None:\n if self.current_char.isspace():\n self.skip_whitespace()\n continue\n if self.current_char.isdigit():\n return Tokenizer(INTEGER, self.integer())\n if self.current_char == '+':\n self.advance()\n return Tokenizer(Token.PLUS, '+')\n if self.current_char == '-':\n self.advance()\n return Tokenizer(Token.MINUS, '-')\n if self.current_char == '*':\n self.advance()\n return Tokenizer(Token.MULTIPLICATION, '*')\n if self.current_char == '/':\n self.advance()\n return Tokenizer(Token.DIVISION, '/')\n\n self.error()\n return Tokenizer(EOF, None)",
"def _get_token(self):\n self._skip()\n\n token = None\n # Checks single-quoted string.\n if self.current_char == \"'\":\n start_position = self.current_position\n while not (self.current_char != \"\\\\\" and self._peek() == \"'\"):\n self._next_char()\n if self.EOF:\n raise LexerError(\n start_position, f\"EOL while scanning string literal at position {start_position}\")\n self._next_char()\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.STRING, self.stream[start_position:self.current_position + 1])\n\n # Checks double-quoted string.\n elif self.current_char == '\"':\n start_position = self.current_position\n while not (self.current_char != \"\\\\\" and self._peek() == '\"'):\n self._next_char()\n if self.EOF:\n raise LexerError(\n start_position, f\"EOL while scanning string literal at position {start_position}\")\n self._next_char()\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.STRING, self.stream[start_position:self.current_position + 1])\n\n # Checks number begins with a digit.\n elif self.current_char.isdigit():\n start_position = self.current_position\n while self._peek().isdigit():\n self._next_char()\n if self._peek() == \".\":\n self._next_char()\n while self._peek().isdigit():\n self._next_char()\n if self._peek() in [\"d\", \"D\", \"f\", \"F\"]:\n self._next_char()\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.NUMBER, self.stream[start_position:self.current_position + 1])\n\n # Checks number begins with a dot.\n elif self.current_char == \".\":\n if self._peek().isdigit():\n start_position = self.current_position\n while self._peek().isdigit():\n self._next_char()\n if self._peek() in [\"d\", \"D\", \"f\", \"F\"]:\n self._next_char()\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.NUMBER, self.stream[start_position:self.current_position + 1])\n else:\n token = Token(self.line_number, self.line_start_position, self.current_position, self.current_position,\n _token_names.Separators(self.current_char).name, self.current_char)\n\n # Checks word begins with an alphabetic letter or an underscore.\n elif self.current_char.isalpha() or self.current_char == \"_\":\n start_position = self.current_position\n while True:\n if (self._peek() in [\" \", \"\\t\", \"\\r\", \"\\n\", \"\\0\"]\n or self._peek() in _token_names.SEPARATORS\n or self._peek() in _token_names.OPERATORS):\n break\n self._next_char()\n word = self.stream[start_position:self.current_position + 1]\n # Checks if word is a keyword.\n if word in _token_names.Keywords.values():\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.Keywords(word).name, word)\n elif word in _token_names.KeywordsType.values():\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.KeywordsType(word).name, word)\n elif word in _token_names.KeywordsAttribute.values():\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.KeywordsAttribute(word).name, word)\n # Otherwise put it as identifier.\n else:\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.IDENTIFIER, word)\n\n # Checks if is a separator.\n elif self.current_char in _token_names.Separators.values():\n token = Token(self.line_number, self.line_start_position, self.current_position, self.current_position,\n _token_names.Separators(self.current_char).name, self.current_char)\n\n # Checks if is an operator.\n elif self.current_char in _token_names.Operators.values():\n last_position = self.current_position\n if self.current_char not in [\"&\", \"|\"] and self._peek() == \"=\":\n val = self.current_char + self._peek()\n self._next_char()\n token = Token(self.line_number, self.line_start_position, last_position, self.current_position,\n _token_names.Operators(val).name, val)\n elif self.current_char == \"+\" and self._peek() == \"+\":\n val = self.current_char + self._peek()\n self._next_char()\n token = Token(self.line_number, self.line_start_position, last_position, self.current_position,\n _token_names.Operators(val).name, val)\n elif self.current_char == \"-\" and self._peek() == \"-\":\n val = self.current_char + self._peek()\n self._next_char()\n token = Token(self.line_number, self.line_start_position, last_position, self.current_position,\n _token_names.Operators(val).name, val)\n elif self.current_char == \"&\" and self._peek() == \"&\":\n val = self.current_char + self._peek()\n self._next_char()\n token = Token(self.line_number, self.line_start_position, last_position, self.current_position,\n _token_names.Operators(val).name, val)\n elif self.current_char == \"|\" and self._peek() == \"|\":\n val = self.current_char + self._peek()\n self._next_char()\n token = Token(self.line_number, self.line_start_position, last_position, self.current_position,\n _token_names.Operators(val).name, val)\n else:\n token = Token(self.line_number, self.line_start_position, self.current_position, self.current_position,\n _token_names.Operators(self.current_char).name, self.current_char)\n\n # Checks if is EOF\n elif self.current_char == \"\\0\":\n token = Token(self.line_number, self.line_start_position, self.current_position, self.current_position,\n _token_names.EOF, self.current_char)\n\n # Raise error if is an unknown token.\n else:\n raise LexerError(self.current_position)\n\n self._next_char()\n return token",
"def peek_for_token(self, ch, check_tok, yes_tok, no_tok):\n if self.peek_char() == check_tok:\n first = ch\n self.read_char()\n literal = first + self.char\n return Token(yes_tok, first + self.char)\n else:\n return Token(no_tok, ch)",
"def next_token(self, tokenum, value, srow, scol):\n\n # Make self.current reflect these values\n self.current.set(tokenum, value, srow, scol)\n\n # Determine indent_type based on this token\n if self.current.tokenum == INDENT and self.current.value:\n self.indent_type = self.current.value[0]\n\n # Only proceed if we shouldn't ignore this token\n if not self.ignore_token():\n # Determining if this token is whitespace\n self.determine_if_whitespace()\n\n # Determine if inside a container\n self.determine_inside_container()\n\n # Change indentation as necessary\n self.determine_indentation()\n\n # See if we are force inserting this token\n if self.forced_insert():\n return\n\n # If we have a newline after an inserted line, then we don't need to worry about semicolons\n if self.inserted_line and self.current.tokenum == NEWLINE:\n self.inserted_line = False\n\n # If we have a non space, non comment after an inserted line, then insert a semicolon\n if self.result and not self.is_space and self.inserted_line:\n if self.current.tokenum != COMMENT:\n self.result.append((OP, \";\"))\n self.inserted_line = False\n\n # Progress the tracker\n self.progress()\n\n # Add a newline if we just skipped a single\n if self.single and self.single.skipped:\n self.single.skipped = False\n self.result.append((NEWLINE, \"\\n\"))\n\n # Set after_space so next line knows if it is after space\n self.after_space = self.is_space",
"def advance(self) -> None:\n self.current_token = self.jack_file_tokens[self._token_idx]\n self._token_idx += 1"
] | [
"0.6593315",
"0.65260625",
"0.650497",
"0.64615226",
"0.6456286",
"0.634234",
"0.62530714",
"0.6209854",
"0.6142705",
"0.60957164",
"0.6057084",
"0.6043257",
"0.60196143",
"0.5970951",
"0.591296",
"0.5848229",
"0.57907677",
"0.57583374",
"0.5704527",
"0.5701983",
"0.5666284",
"0.56626767",
"0.5662294",
"0.5656541",
"0.5634419",
"0.56065947",
"0.55666673",
"0.5564335",
"0.5539288",
"0.550106"
] | 0.7741878 | 0 |
Eat all the next EOL | def eat_EOL(self):
# print("Start eating EOL")
self.eat(EOL)
while self.current_token.type == EOL:
self.eat(EOL)
# print("Stop eating EOL") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def maybe_eol(self):\n if self.current == CR:\n self.next()\n if self.current == LF:\n self.next()\n elif self.current == LF:\n self.next()",
"def eol(self):\n if self.current not in EOL:\n self.on_parser_error(\"EOL expected\")\n self.maybe_eol()",
"def rl(size=None, eol=NEWLINE):\n ret = \"\"\n while True:\n x = self.comm.read()\n if x == eol:\n return ret\n ret = ret + x",
"def do_eof(self, line):\n print \"\"\n return True",
"def do_EOF(self, line):\n print(\"\")\n return True",
"def do_EOF(self, line):\n print()\n return True",
"def _advance_line(self):\n self.current_index += 1\n if self.current_index >= len(self.file):\n self.current_line = 'EOF'\n return\n self.current_line = self.file[self.current_index].strip()\n while self.current_line.startswith('#') or self.current_line == '':\n self.current_index += 1\n if self.current_index >= len(self.file):\n self.current_line = 'EOF'\n return\n self.current_line = self.file[self.current_index].strip()\n self._gobble_comments()",
"def rehydrate_hard_break(cls, next_token):\n return next_token.line_end",
"def nextLine(self) -> str:\n raise NotImplementedError",
"def do_EOF(self, line):\n return True",
"def do_EOF(self, line):\n return True",
"def do_EOF(self, line):\n return True",
"def raw_next_line() -> str:\n return input()",
"def is_eof(line):\n return line == \"\"",
"def _next_char(self):\n self.current_position += 1\n if self.current_position >= len(self.stream):\n self.current_char = \"\\0\"\n self.EOF = True\n else:\n self.current_char = self.stream[self.current_position]\n if self.current_char == \"\\n\":\n self.line_number += 1\n self.line_start_position = self.current_position",
"def advance(self):\n line = self.stream.readline()\n while line is not None:\n # Strip comments or empty spaces\n line = re.sub('//.*', '', line).strip()\n\n # Avoid comments or empty lines\n if line != '':\n break\n\n line = self.stream.readline()\n\n if line is None:\n print \"No more commands.\"\n return\n\n self.current_command = line",
"def next_line(self, context, line):",
"def expect_eol(self):\n if self.length != 0:\n raise ParseError('Spurius words after parsing instruction')",
"def next_line() -> str:\n return input().strip()",
"def eol(self):\n return self.pos == len(self.tokens)",
"def readline(self):\n while(True):\n rxcount = self.in_waiting \n if rxcount > 0: \n for pos, i in enumerate(self.buffer):\n # look for the \\n\n if i == 10: \n line=''\n linebuf = self.buffer[:pos]\n self.buffer = self.buffer[pos+1:]\n for c in linebuf:\n line += chr(c)\n return line",
"def lf(self):\n self._write('\\n')",
"def process_next_char(self): \n self.current_position += 1\n if self.current_position >= len(self.code_input):\n '''End of file since the position is equal to or greater than the input's position'''\n self.current_char = '\\0' #EOF\n print('end of line')\n self.current_char = self.code_input[self.current_position]",
"def readline(self):\n line = \"\"\n n_pos = -1\n try:\n while n_pos < 0:\n line += self.next_chunk()\n n_pos = line.find('\\n')\n except StopIteration:\n pass\n\n if n_pos >= 0:\n line, extra = line[:n_pos+1], line[n_pos+1:]\n self.unshift(extra)\n return line",
"def home(self):\n while self.document.characters[self.position-1].character != '\\n':\n self.position -= 1\n if self.position == 0:\n # Got to beginning of file before newline\n break",
"def do_EOF(self, line):\n print()\n models.storage.save()\n return True",
"def end(self):\n while self.position < len(self.document.characters\n ) and self.document.characters[\n self.position].character != '\\n':\n self.position += 1",
"def try_print_newline(self):\n if self.lasttoken is None:\n return\n\n # Anywhere we writecode does not need the new line character\n no_new_line = {lex.Token.CODE, lex.Token.MULTILINE, lex.Token.ONELINE}\n up_scope = {lex.Token.EXPRESSION, lex.Token.PARENEXPRESSION}\n if not self.lasttoken[0] in no_new_line:\n if self.lasttoken[0] in up_scope:\n self.buffer.scope += 1\n self.buffer.scope_line(\"__io.write(u'\\\\n')\")\n if self.lasttoken[0] in up_scope:\n self.buffer.scope -= 1",
"def feed(self, token, test_newline=True):\n if test_newline:\n newlines = token.count(self.newline_char)\n if newlines:\n self.line += newlines\n self.line_start_pos = self.char_pos + token.rindex(self.newline_char) + 1\n\n self.char_pos += len(token)\n self.column = self.char_pos - self.line_start_pos + 1",
"def readline(self):\n if self.linebuf:\n line = self.linebuf[(-1)]\n del self.linebuf[-1]\n return line\n buildup_line = ''\n while 1:\n line = self.file.readline()\n if line == '':\n line = None\n if self.strip_comments:\n if line:\n pos = line.find('#')\n if pos == -1:\n pass\n else:\n if pos == 0 or line[(pos - 1)] != '\\\\':\n eol = line[(-1)] == '\\n' and '\\n' or ''\n line = line[0:pos] + eol\n if line.strip() == '':\n continue\n else:\n line = line.replace('\\\\#', '#')\n if self.join_lines and buildup_line:\n if line is None:\n self.warn('continuation line immediately precedes end-of-file')\n return buildup_line\n if self.collapse_join:\n line = line.lstrip()\n line = buildup_line + line\n if isinstance(self.current_line, list):\n self.current_line[1] = self.current_line[1] + 1\n else:\n self.current_line = [\n self.current_line,\n self.current_line + 1]\n else:\n if line is None:\n return\n else:\n if isinstance(self.current_line, list):\n self.current_line = self.current_line[1] + 1\n else:\n self.current_line = self.current_line + 1\n if self.lstrip_ws:\n if self.rstrip_ws:\n line = line.strip()\n if self.lstrip_ws:\n line = line.lstrip()\n else:\n if self.rstrip_ws:\n line = line.rstrip()\n if line == '' or line == '\\n':\n if self.skip_blanks:\n continue\n if self.join_lines:\n if line[(-1)] == '\\\\':\n buildup_line = line[:-1]\n continue\n if line[-2:] == '\\\\\\n':\n buildup_line = line[0:-2] + '\\n'\n continue\n return line"
] | [
"0.795126",
"0.7004974",
"0.6905897",
"0.679999",
"0.67539823",
"0.6661748",
"0.6637306",
"0.65803117",
"0.6556912",
"0.6528691",
"0.6528691",
"0.6528691",
"0.6525438",
"0.6516711",
"0.6511128",
"0.6441263",
"0.6417774",
"0.6373478",
"0.63628477",
"0.6303268",
"0.6265118",
"0.6215369",
"0.62129444",
"0.6164259",
"0.6137777",
"0.61047304",
"0.60906476",
"0.60779",
"0.6009902",
"0.6009433"
] | 0.7717989 | 1 |
Returns a list of content types from the models defined in settings. | def _get_seo_content_types(seo_models):
try:
return [ContentType.objects.get_for_model(m).id for m in seo_models]
except Exception: # previously caught DatabaseError
# Return an empty list if this is called too early
return [] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def listFeaturableContentTypes():",
"def get_searchable_content_types():\n record = dict(interface=ICoverSettings, name='searchable_content_types')\n return api.portal.get_registry_record(**record)",
"def content_types(self):\n return self.get(\"content_type\", decode=True).split(\"#\")",
"def get_content_type_configs(self) -> t.Mapping[str, ContentTypeConfig]:",
"def content_types(self):\n return SpaceContentTypesProxy(self._client, self.id)",
"def for_app_models(self, *args, **kwargs):\n content_types = []\n for app_model in args:\n app, model = app_model.split(\".\")\n content_types.append(ContentType.objects.get(app_label=app, \n model=model))\n return self.for_content_types(content_types, **kwargs)",
"def models(self):\n return self._base.classes",
"def get_types(self):\n return self.types",
"def getTypesList():\n return Gw2Spidy._request('types')['results']",
"def ct(mdl_cls):\n return ContentType.objects.get_for_model(mdl_cls)",
"def models(self):\n return self.config.models()",
"def list_supported_models() -> Sequence[str]:\r\n return list(_MODELS)",
"def models(self):\r\n return self.get_field('model')",
"def models(self):\r\n return self.get_field('model')",
"def types(self) -> List[str]:\n return self._types",
"def doc_types(self):\n return self._extract_set('doc_type')",
"def content_type(self):\n return ContentType.objects.get_for_model(self)",
"def getTypes(self):\n return self._doRequest(self.httpClient.getTypes)",
"def models() -> list[str]:\n return list(models_url.keys())",
"def get_post_types(self):\n types = self.session.query(PostType).all()\n return types",
"def get_response_content_types_list(response):\n # type: (AnyResponseType) -> List[Str]\n content_types = []\n known_types = [\"application\", \"audio\", \"font\", \"example\", \"image\", \"message\", \"model\", \"multipart\", \"text\", \"video\"]\n for part in response.headers[\"Content-Type\"].split(\";\"):\n for sub_type in part.strip().split(\",\"):\n if \"=\" not in sub_type and sub_type.split(\"/\")[0] in known_types:\n content_types.append(sub_type)\n return content_types",
"def get_seo_models():\n seo_models = []\n for model_name in getattr(settings, setting_name_seo_models, ()):\n if \".\" in model_name:\n # TODO: Test this block\n app_label, model_name = model_name.split(\".\", 1)\n model = models.get_model(app_label, model_name)\n if model:\n seo_models.append(model)\n else:\n app = models.get_app(model_name)\n if app:\n seo_models.extend(models.get_models(app))\n\n return seo_models",
"def document_types(db: Session = Depends(get_db)):\n return get_document_types(db)",
"def get_supported_models(self):\n # type: () -> list\n return [model for model in self.__MODELS]",
"def get_describable_list(request):\n describables = []\n\n from django.apps import apps\n for entity in apps.get_app_config('descriptor').describable_entities:\n content_type = get_object_or_404(\n ContentType, app_label=entity._meta.app_label, model=entity._meta.model_name)\n\n describables.append({\n 'id': content_type.pk,\n 'value': \"%s.%s\" % (entity._meta.app_label, entity._meta.model_name),\n 'label': str(entity._meta.verbose_name.capitalize())\n })\n\n return HttpResponseRest(request, describables)",
"def get_models(self):\n return [Doc(system_object) for system_object in self._get_documents()]",
"def content_types_xml(self):\n parts = self._get_parts()\n cti = _ContentTypesItem.from_parts(parts)\n return cti.blob",
"def models(self):\n models = []\n for bundle in self.bundles.values():\n models.extend(list(bundle.models.values()))\n\n return models",
"def get_types(self) :\n\n return list(self.types)[1:]",
"def _get_content_type(self):\n if not getattr(self, 'model', None):\n raise NotImplementedError(f\"{self.__class__.__name__} must specify a model class.\")\n return ContentType.objects.get_for_model(self.model)"
] | [
"0.7349977",
"0.71621644",
"0.6720084",
"0.6582432",
"0.65513587",
"0.64740825",
"0.6369539",
"0.634687",
"0.62149596",
"0.6032758",
"0.6010961",
"0.6010262",
"0.59955335",
"0.59955335",
"0.5927626",
"0.5916104",
"0.5896257",
"0.5886604",
"0.58839566",
"0.58676517",
"0.585921",
"0.58436275",
"0.5821181",
"0.58073205",
"0.57634395",
"0.5761921",
"0.5745699",
"0.5719566",
"0.5711739",
"0.5703778"
] | 0.7348473 | 1 |
split a strem into a list of blocks of size block_size | def block_split(stream, block_size=BLOCK_SIZE_IN_BYTES):
# TODO: this could possibly be a generator
return [stream[i:i + BLOCK_SIZE_IN_BYTES]
for i in range(0, len(stream), BLOCK_SIZE_IN_BYTES)] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def block_splitter(data, block_size):\n buf = []\n for i, datum in enumerate(data):\n buf.append(datum)\n if len(buf) == block_size:\n yield buf\n buf = []\n\n # If there's anything leftover (a partial block),\n # yield it as well.\n if buf:\n yield buf",
"def split_chunk(list, chunk_size):\n for i in range(0, len(list), chunk_size):\n yield list[i:i + chunk_size]",
"def str_to_block(s, block_size):\n n = len(s)/block_size\n res = []\n for i in range(n):\n sub_s = s[i*block_size:i*block_size+block_size]\n res.append(sub_s)\n return res",
"def chunks(items, size):\n return [items[i:i+size] for i in range(0, len(items), size)]",
"def splitInBlocks (l, n):\n k = len(l) / n\n r = len(l) % n\n\n i = 0\n blocks = []\n while i < len(l):\n if len(blocks)<r:\n blocks.append(l[i:i+k+1])\n i += k+1\n else:\n blocks.append(l[i:i+k])\n i += k\n\n return blocks",
"def split_to_chunks(of_list, chunk_size):\n assert of_list is not None\n\n for i in range(0, len(of_list), chunk_size):\n yield of_list[i:i + chunk_size]",
"def chunks(lst, size):\n for i in range(0, len(lst), size):\n yield lst[i:i + size]",
"def chunk(it, size):\n it = iter(it)\n return iter(lambda: list(islice(it, size)), [])",
"def _split_in_chunks(lst: Sequence[Any], chunksize: int) -> Iterator[Sequence[Any]]:\n for i in range(0, len(lst), chunksize):\n yield lst[i:i + chunksize]",
"def getChunks(inp_list, chunk_size):\n return [inp_list[x:x + chunk_size] for x in range(0, len(inp_list), chunk_size)]",
"def chunks(self, list_to_chunk, size):\n for i in range(0, len(list_to_chunk), size):\n yield list_to_chunk[i:i + size]",
"def split_list(l, sizes):\n chunks = []\n offset = 0\n for size in sizes:\n chunks.append(l[offset:offset + size])\n offset += size\n return chunks",
"def get_chunks(vals, size):\n for i in range(0, len(vals), size):\n yield vals[i:i + size]",
"def divide_list_in_chunks(self, elements, chunk_size):\n if len(elements) == 0:\n yield []\n for i in range(0, len(elements), chunk_size):\n yield elements[i:i + chunk_size]",
"def chunks(lst, chunk_size):\n for i in range(0, len(lst), chunk_size):\n yield lst[i:i + chunk_size]",
"def ghetto_split(list_, chunk_size=100):\n logging.debug(f\"Splitting list of {len(list_)} length, chunk size = {chunk_size}\")\n split_lists = []\n for i in range(0,len(list_),chunk_size):\n split_lists.append(list_[i:i+chunk_size])\n logging.debug(f\"List has been split into {len(split_lists)} lists. Total num of elements in split lists is {sum([len(i) for i in split_lists])}\")\n return split_lists",
"def blockify_chunks(chunks):\n acc = []\n size = 0\n for chunk, chunk_size in chunks:\n assert len(chunk) == CHUNK_SIZE\n assert len(acc) <= BLOCK_SIZE\n if len(acc) == BLOCK_SIZE:\n # Only the last chunk may be short.\n assert size == CHUNK_SIZE * BLOCK_SIZE\n yield acc, size\n acc = []\n size = 0\n acc.append(chunk)\n size += chunk_size\n assert acc\n yield acc, size",
"def get_chunks(lst, n):\n size = int(len(lst) / n)\n output_list = []\n for i in range(0, n):\n sub_list = lst[i*size:i*size + size]\n output_list.append(sub_list)\n if len(lst) % n != 0:\n for i in range((n-1)*size+1, len(lst)):\n output_list[-1].append(lst[i])\n return output_list",
"def get_chunks(size):\n chunk_start = 0\n chunk_size = 0x20000\n\n while chunk_start + chunk_size < size:\n yield (chunk_start, chunk_size)\n chunk_start += chunk_size\n if chunk_size < 0x100000:\n chunk_size += 0x20000\n\n if chunk_start < size:\n yield (chunk_start, size - chunk_start)",
"def chunker(seq: list, size: int) -> list:\n if isinstance(seq, list) == False:\n raise ValueError(\"`seq` must be a list\")\n return list(seq[pos:pos + size] for pos in range(0, len(seq), size))",
"def chunk(list, chunksize):\n for i in range(0, len(list), chunksize):\n yield list[i:i + chunksize]",
"def chunk(seq, size, groupByList=True):\n func = tuple\n if groupByList:\n func = list\n return [func(seq[i:i + size]) for i in range(0, len(seq), size)]",
"def _chunker(self, seq, size):\n return (seq[pos:pos + size] for pos in range(0, len(seq), size))",
"def _chunk_bs(bs, step=2):\n stop = len(bs)\n start = 0\n bs_to_list = []\n for bstep in range(0, stop, step):\n bs_to_list.insert(bstep, bs[start:bstep+step])\n start = start + step\n return bs_to_list",
"def _split_on_chunks(self, iterable, size):\n return utils.split_on_chunks(iterable, size)",
"def chunkify(list,size):\n for i in range (0, len(list), size):\n yield list[i:i+size]",
"def get_chunks(sequence, ck_size):\n \n list_chunk = []\n i=1\n l = len(sequence)\n if l < 4*ck_size:\n raise ValueError(\"Chunk size should be of 4 at least \")\n for i in range(1, l):\n if i*ck_size < l:\n list_chunk.append(sequence[i*ck_size-ck_size:i*ck_size])\n #while(i*ck_size < l):\n #list_chunk.append(sequence[i*ck_size-ck_size:i*ck_size])\n #i += 1\n return list_chunk",
"def chunk(flat, sizes):\n iter_flat = iter(flat)\n yield from (list(islice(iter_flat, 0, size)) for size in sizes)",
"def make_chunks(l, n):\n return [l[i:i+n] for i in range(0, len(l), n)]",
"def divide_chunks(a_list, n):\n return [a_list[i:i + n] for i in range(0, len(a_list), n)]"
] | [
"0.7169465",
"0.7118412",
"0.6919498",
"0.68670726",
"0.68363047",
"0.6794331",
"0.67409796",
"0.673424",
"0.67077184",
"0.6663819",
"0.66617143",
"0.6639705",
"0.65981317",
"0.65813637",
"0.6572411",
"0.65618354",
"0.6526337",
"0.65255904",
"0.6516533",
"0.6511457",
"0.65091085",
"0.6506129",
"0.6475647",
"0.6473687",
"0.64590114",
"0.6450244",
"0.6447278",
"0.64270604",
"0.6404486",
"0.6381866"
] | 0.74972177 | 0 |
discover bluetooth le peripherals and their chracteristics | def discover_BLE_characteristics(lodBLE):
#logging = tls.console_logger()
cb.set_verbose(True)
cb.reset()
Delg = bleDelegate(lodBLE)
cb.set_central_delegate(Delg)
cb.scan_for_peripherals()
logging.info('Waiting for callbacks state=%s' % (cb.get_state()))
while not Delg.allFound():
time.sleep(1)
logging.info('found %d characteristics' % len(Delg.charFound))
cb.stop_scan()
return Delg | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def discover(bt_addr):\n print \"performing inquiry...\"\n nearby_devices = bluetooth.discover_devices(lookup_names = True)\n print \"Found %d devices\" % len(nearby_devices)\n \n for addr, name in neaby_devices:\n print \" %s - %s\" % (addr, name)",
"def scan_bluetooth(self):\n nearby_devices = bluetooth.discover_devices(lookup_names=True)\n print(\"Found {} devices at {}\".format(len(nearby_devices), datetime.now()))\n timestamp = datetime.now().strftime('%m/%d/%Y %H:%M:%S')\n self.capture = self.MonitorCapture(timestamp=timestamp, structure=nearby_devices, ip_addr=self.ip_addr,\n location=self.location)\n for name, addr in nearby_devices:\n print(\" %s - %s\" % (addr, name))\n\n self.capture = json.dumps(self.capture.__dict__)",
"def bt_get_discovered_devices(self):\n discovered_bluetooth_device = []\n try:\n self.bt_radio('on')\n if '8.1' in self.phone_info.os_version:\n self.wait_till_element_to_be_visible(self.driver.appium_driver,\n self.bluetooth_pair_new_device_in_android_8_1_button,\n 10)\n self.find_element(self.driver.appium_driver,\n self.bluetooth_pair_new_device_in_android_8_1_button,\n 2).click()\n time.sleep(10)\n self.wait_till_element_to_be_visible(self.driver.appium_driver,\n self.bluetooth_discovered_device_list,\n 10)\n element_list = self.find_elements(self.driver.appium_driver,\n self.bluetooth_discovered_device_list,\n 1)\n\n # To add connected bluetooth device name in list\n for index in range(len(element_list)):\n discovered_bluetooth_device.append(\n str(element_list[index].text.replace('\\u200e', '')))\n logger.debug(\"List of Discovered Devices:\" + str(\n discovered_bluetooth_device))\n except Exception as e:\n self.take_screenshot(self.driver.appium_driver,\n '__retry_to_bt_connect')\n logger.error(\"No device are discoverable .\")\n logger.error(repr(e))\n return discovered_bluetooth_device",
"def getDevices():\n \n scannedDevices = list()\n \n proc = subprocess.Popen('bluetoothctl scan on', shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=8192, universal_newlines=True)\n \n time.sleep(10)\n \n proc.stdin.write('scan off')\n \n try:\n stdout, stderr = proc.communicate()\n except subprocess.TimeoutExpired:\n proc.kill()\n stdout, stderr = proc.communicate()\n\n ansiEscapePattern = re.compile(r'\\x1B[@-_][0-?]*[ -/]*[@-~]')\n stdout = ansiEscapePattern.sub('', stdout)\n \n #deviceNamePattern = re.compile('^\\[NEW\\] Device [A-F0-9]{2}:[A-F0-9]{2}:[A-F0-9]{2}:[A-F0-9]{2}:[A-F0-9]{2}:[A-F0-9]{2} ')\n \n for line in stdout.split('\\n'):\n if '[NEW] Device' in line:\n device = list()\n device.append(line[13:31])\n device.append(line[31:])\n scannedDevices.append(device)\n \n return scannedDevices",
"def connect_magic():\n nearby_devices = bluetooth.discover_devices(lookup_names = True, duration=5)\n\n for addr, name in nearby_devices:\n print(name)\n if name == \"MindWave Mobile\":\n print \"found\"\n return (connect_bluetooth_addr(addr), addr)\n return (None, \"\")",
"def scan_chip_ble_devices(devCtrl):\n devices = []\n bleMgr = BleManager(devCtrl)\n bleMgr.scan(\"-t 10\")\n\n for device in bleMgr.peripheral_list:\n devIdInfo = bleMgr.get_peripheral_devIdInfo(device)\n if devIdInfo:\n devInfo = devIdInfo.__dict__\n devInfo[\"name\"] = device.Name\n devices.append(devInfo)\n\n return devices",
"def bt_scan():\n print(\"Searching for nearby devices...\")\n explore_devices = []\n if explorepy._bt_interface == 'sdk':\n device_manager = explorepy.exploresdk.ExploreSDK_Create()\n nearby_devices = device_manager.PerformDeviceSearch()\n for bt_device in nearby_devices:\n if \"Explore\" in bt_device.name:\n print(\"Device found: %s - %s\" % (bt_device.name, bt_device.address))\n explore_devices.append((bt_device.name, bt_device.address))\n else:\n import bluetooth\n nearby_devices = bluetooth.discover_devices(lookup_names=True)\n for address, name in nearby_devices:\n if \"Explore\" in name:\n print(\"Device found: %s - %s\" % (name, address))\n explore_devices.append((address, name))\n\n if not nearby_devices:\n print(\"No Devices found\")\n\n return explore_devices",
"async def discover(*args):\n # Since discovery needs to connect to all discovered bluetooth devices, and\n # only rules out devices after a timeout, it can potentially take a long\n # time. If there's already a discovery running, just skip this poll.\n if hass.data[DOMAIN][\"discovery\"].locked():\n return\n\n async with hass.data[DOMAIN][\"discovery\"]:\n bluetooth_devices = await hass.async_add_executor_job(\n pykulersky.discover_bluetooth_devices\n )\n\n # Filter out already connected lights\n new_devices = [\n device\n for device in bluetooth_devices\n if device[\"address\"] not in hass.data[DOMAIN][\"devices\"]\n ]\n\n for device in new_devices:\n light = pykulersky.Light(device[\"address\"], device[\"name\"])\n try:\n # If the connection fails, either this is not a Kuler Sky\n # light, or it's bluetooth connection is currently locked\n # by another device. If the vendor's app is connected to\n # the light when home assistant tries to connect, this\n # connection will fail.\n await hass.async_add_executor_job(check_light, light)\n except pykulersky.PykulerskyException:\n continue\n # The light has successfully connected\n hass.data[DOMAIN][\"devices\"].add(device[\"address\"])\n async_add_entities([KulerskyLight(light)], update_before_add=True)",
"def getPairConDevices():\n \n # Enable bluetooth service if not enabled\n changeBluetoothService(enable=True)\n \n # List available bluetooth devices\n blueDevices = execCommand('bluetoothctl devices')\n \n # parse available devices to list\n availDevices = list()\n for device in blueDevices.split('\\n'):\n if 'Device' in device:\n deviceList = list()\n deviceList.append(device[25:])\n deviceList.append(device[7:24])\n availDevices.append(deviceList)\n \n # check paired and connected devices\n pairedDevices = list()\n connectedDevices = list()\n for device in availDevices:\n deviceInfo = execCommand('bluetoothctl info {}'.format(device[1]))\n if 'Paired: yes' in deviceInfo:\n pairedDevices.append(device)\n if 'Connected: yes' in deviceInfo:\n connectedDevices.append(device)\n \n return pairedDevices, connectedDevices",
"async def async_find_available_devices(hass: HomeAssistant, username: str, password: str):\n\n result = []\n\n devices = await DeviceScanner.async_find_devices(hass)\n\n _LOGGER.debug(\"Found %d AwoX devices\" % (len(devices)))\n\n for mac, dev in devices.items():\n _LOGGER.debug(\"Device %s [%s]\" % (dev['name'], dev['mac']))\n try:\n mylight = DeviceScanner._connect(dev['mac'], username, password)\n if mylight.session_key:\n result.append({\n 'mac': dev['mac'],\n 'name': mylight.getModelNumber()\n })\n mylight.disconnect()\n except:\n _LOGGER.debug('Failed to connect [%s]' % dev['mac'])",
"async def async_step_bluetooth(\n self, discovery_info: BluetoothServiceInfoBleak\n ) -> FlowResult:\n _LOGGER.debug(\"Discovered bluetooth device: %s\", discovery_info)\n await self.async_set_unique_id(format_unique_id(discovery_info.address))\n self._abort_if_unique_id_configured()\n parsed = parse_advertisement_data(\n discovery_info.device, discovery_info.advertisement\n )\n if not parsed or parsed.data.get(\"modelName\") not in SUPPORTED_MODEL_TYPES:\n return self.async_abort(reason=\"not_supported\")\n self._discovered_adv = parsed\n data = parsed.data\n self.context[\"title_placeholders\"] = {\n \"name\": data[\"modelName\"],\n \"address\": discovery_info.address,\n }\n return await self.async_step_user()",
"def discover(timeout=10):\n _LOGGER.info(\"Starting scan for local devices\")\n\n import pygatt\n adapter = pygatt.GATTToolBackend()\n\n lights = []\n try:\n adapter.start(reset_on_start=False)\n for device in adapter.scan(timeout=timeout):\n # Improvements welcome\n if device['name'] and device['name'].startswith('LEDBlue-'):\n _LOGGER.info(\n \"Discovered %s: %s\", device['address'], device['name'])\n lights.append(\n Light(device['address'], device['name'].strip()))\n except pygatt.BLEError as ex:\n raise ZerprocException() from ex\n finally:\n try:\n adapter.stop()\n except pygatt.BLEError as ex:\n raise ZerprocException() from ex\n\n _LOGGER.info(\"Scan complete\")\n return lights",
"def fetch_data(self, **kwargs: Any) -> Dict[str, Any]:\n\n # attach notification handler\n self.scanner = Scanner(iface=int(self.interface.replace(\"hci\", \"\"))).withDelegate(self)\n\n try:\n self.scanner.scan(SCAN_TIMEOUT)\n except BTLEDisconnectError as error:\n logging.error(f\"btle disconnected: {error}\")\n except BTLEManagementError as error:\n logging.error(f\"(temporary) bluetooth connection error: {error}\")\n\n return self.data",
"def _list_muses_bluetoothctl(timeout, verbose=False):\n try:\n import pexpect\n except (ImportError, ModuleNotFoundError):\n msg = ('pexpect is currently required to use bluetoothctl from within '\n 'a jupter notebook environment.')\n raise ModuleNotFoundError(msg)\n\n # Run scan using pexpect as subprocess.run returns immediately in jupyter\n # notebooks\n print('Searching for Muses, this may take up to 10 seconds...')\n scan = pexpect.spawn('bluetoothctl scan on')\n try:\n scan.expect('foooooo', timeout=timeout)\n except pexpect.EOF:\n before_eof = scan.before.decode('utf-8', 'replace')\n msg = f'Unexpected error when scanning: {before_eof}'\n raise ValueError(msg)\n except pexpect.TIMEOUT:\n if verbose:\n print(scan.before.decode('utf-8', 'replace').split('\\r\\n'))\n\n # List devices using bluetoothctl\n list_devices_cmd = ['bluetoothctl', 'devices']\n devices = subprocess.run(\n list_devices_cmd, stdout=subprocess.PIPE).stdout.decode(\n 'utf-8').split('\\n')\n muses = [{\n 'name': re.findall('Muse.*', string=d)[0],\n 'address': re.findall(r'..:..:..:..:..:..', string=d)[0]\n } for d in devices if 'Muse' in d]\n _print_muse_list(muses)\n\n return muses",
"def get_available_devices(self):\n try:\n out = self.get_output(\"devices\")\n except BluetoothctlError, e:\n print(e)\n return None\n else:\n available_devices = []\n for line in out:\n device = self.parse_device_info(line)\n if device:\n available_devices.append(device)\n\n return available_devices",
"def get_devices():\n global managed_objects\n global devices_by_adr\n \n devices_by_adr = {}\n \n r = re.compile(\"\\/org\\/bluez\\/hci\\d*\\/dev\\_(.*)\")\n # e.g., match a string like this:\n # /org/bluez/hci0/dev_58_C9_35_2F_A1_EF\n \n for key, value in managed_objects.items():\n # print(\"key=\", key)\n m = r.match(key)\n if m is not None:\n dev_str = m.group(1) # we have a device string!\n # print(\"dev_str=\", dev_str)\n # let's flatten that dict a bit\n devices_by_adr[dev_str] = value[\"org.bluez.Device1\"]",
"def device_discovery(endless):\r\n click.echo(\"start device discovery ...\")\r\n _device_discovery(endless)",
"def find_connections():\n # print \"External\"\n # print findservices('00:0D:93:19:C8:68')\n # print findservices('bc:f5:ac:84:81:0c')\n # print finddevices()\n # print findservices(gethostaddr())\n # print gethostclass()\n print \"Your address: \", lb.gethostaddr()\n print lb.finddevicename(lb.gethostaddr())\n s = lb.socket()\n #s.bind((\"\", 0)) # RFCOMM port\n #s.bind((\"\", 1)) # RFCOMM port\n s.bind((\"\", 2)) # RFCOMM port\n print \"About to listen\"\n s.listen(1)\n print \"About to advertise\"\n lb.advertise(\"LightBlueService\", s, lb.RFCOMM)\n print \"Advertised at {} and listening on channel {}...\".format(s.getsockname()[0], s.getsockname()[1])\n print \"Waiting to accept\"\n # s.setblocking(1)\n try:\n conn, addr = s.accept()\n except KeyboardInterrupt:\n print \"Closing connection due to keyboard intterupt\"\n s.close()\n raise KeyboardInterrupt\n # Set timeout for 1 second\n # s.settimeout(1.0)\n print \"Connected by\", addr\n return conn, addr, s",
"def getPeripheralNames(self):\n pass",
"def sample(self):\n print(\"sampling bluetooth arduino\")\n self.sock.send(b'B')\n data = b''\n '''while True:\n data += self.sock.recv(1024)\n if data.endswith(b'\\n'):\n break\n '''\n #self.sock.settimeout(2)\n try:\n while True:\n d = self.sock.recv(255)\n data += d\n if d.find(b'\\n') != -1:\n break\n except Exception as err:\n print(err)\n pass\n print(data)\n data = json.loads(data.decode())\n if not any(x == 0 for x in data.values()):\n for label in self.data_labels:\n self.data[label].append(data[label])\n #self.data[\"time\"].append(time.time() - self.start)\n print(data)\n #print(self.data)\n #return self.data",
"def test_gatt_connect_trigger_on_services_discovered_iterate_attributes(\n self):\n gatt_server_cb = self.per_ad.droid.gattServerCreateGattServerCallback()\n gatt_server = self.per_ad.droid.gattServerOpenGattServer(\n gatt_server_cb)\n self.gatt_server_list.append(gatt_server)\n try:\n bluetooth_gatt, gatt_callback, adv_callback = (\n orchestrate_gatt_connection(self.cen_ad, self.per_ad))\n self.bluetooth_gatt_list.append(bluetooth_gatt)\n except GattTestUtilsError as err:\n self.log.error(err)\n return False\n self.adv_instances.append(adv_callback)\n if self.cen_ad.droid.gattClientDiscoverServices(bluetooth_gatt):\n expected_event = gatt_cb_strings['gatt_serv_disc'].format(\n gatt_callback)\n try:\n event = self.cen_ad.ed.pop_event(expected_event,\n self.default_timeout)\n discovered_services_index = event['data']['ServicesIndex']\n except Empty:\n self.log.error(gatt_cb_err['gatt_serv_disc'].format(\n expected_event))\n return False\n log_gatt_server_uuids(self.cen_ad, discovered_services_index)\n return self._orchestrate_gatt_disconnection(bluetooth_gatt,\n gatt_callback)",
"async def get_bluetooth_info(self,\n reset_inactivity_timeout=True,\n response_timeout_in_seconds=None):\n command = _create_get_bluetooth_info_command(sequence_number=self._get_and_increment_command_sequence_number(),\n wait_for_response=True,\n reset_inactivity_timeout=reset_inactivity_timeout)\n\n response_packet = await self._send_command(command,\n response_timeout_in_seconds)\n\n return _parse_bluetooth_info(response_packet.data)",
"def scan():\n debug(\"CBA4.scan()\")\n num = MpOrLibUsb.get_device_count()\n devices = []\n i = 0\n while i < num:\n cba = CBA4(interface=MpOrLibUsb(i))\n i += 1\n sn = cba.get_serial_number()\n if sn:\n devices.append(sn)\n cba.close()\n #end loop\n return devices\n #end scan()",
"async def find_devices() -> List[DeviceInfo]:\n return await Discovery.search_devices()",
"def test_gatt_connect_get_connected_devices(self):\n gatt_server_cb = self.per_ad.droid.gattServerCreateGattServerCallback()\n gatt_server = self.per_ad.droid.gattServerOpenGattServer(\n gatt_server_cb)\n self.gatt_server_list.append(gatt_server)\n try:\n bluetooth_gatt, gatt_callback, adv_callback = (\n orchestrate_gatt_connection(self.cen_ad, self.per_ad))\n self.bluetooth_gatt_list.append(bluetooth_gatt)\n except GattTestUtilsError as err:\n self.log.error(err)\n return False\n conn_cen_devices = self.cen_ad.droid.bluetoothGetConnectedLeDevices(\n bt_profile_constants['gatt'])\n conn_per_devices = self.per_ad.droid.bluetoothGetConnectedLeDevices(\n bt_profile_constants['gatt_server'])\n target_name = self.per_ad.droid.bluetoothGetLocalName()\n error_message = (\"Connected device {} not found in list of connected \"\n \"devices {}\")\n if not any(d['name'] == target_name for d in conn_cen_devices):\n self.log.error(error_message.format(target_name, conn_cen_devices))\n return False\n # For the GATT server only check the size of the list since\n # it may or may not include the device name.\n target_name = self.cen_ad.droid.bluetoothGetLocalName()\n if not conn_per_devices:\n self.log.error(error_message.format(target_name, conn_per_devices))\n return False\n self.adv_instances.append(adv_callback)\n return self._orchestrate_gatt_disconnection(bluetooth_gatt,\n gatt_callback)",
"def broadcast(loopstate):\n cmdstring = 'sudo hcitool -i hci0 cmd ' # Send cmd to hci0\n cmdstring += '0x08 ' # Set group to BLE\n cmdstring += '0x0008 ' # Set command to HCI_LE_Set_Advertising_Data\n cmdstring += '0D ' # Length of entire following data, in bytes\n cmdstring += '02 ' # Length of flag info\n cmdstring += '01 ' # Use AD flags\n cmdstring += '02 ' # Flag value:\n # bit 0 (OFF) LE Limited Discoverable Mode\n # bit 1 (ON) LE General Discoverable Mode\n # bit 2 (OFF) BR/EDR Not Supported\n # bit 3 (ON) Simultaneous LE and BR/EDR to Same Device Capable (controller)\n # bit 4 (ON) Simultaneous LE and BR/EDR to Same Device Capable (Host)\n cmdstring += '09 ' # Length of following message, in bytes\n cmdstring += '07 ' # GAP value (07 = 128 Bit Complete Service UUID List)\n cmdstring += '42 69 63 79 63 6c 65 ' # Header to identify beacon message-\n # - and it's also is Bicycle in ASCII!\n if loopstate:\n cmdstring = cmdstring + LOOP_ON\n else:\n cmdstring = cmdstring + LOOP_OFF + ' >/dev/null 2>&1'\n subprocess.call(cmdstring, shell=True)\n subprocess.call('sudo hciconfig hci0 leadv 3 >/dev/null 2>&1', shell=True)",
"def __initialize_variable_of_bluetooth(self):\n if self.phone_info.phone_type == PhoneType.ANDROID:\n self.package_name = OPPOConstant.bluetooth_settings_app_package\n self.activity_name = OPPOConstant.bluetooth_app_activity\n self.bt_unpair_button= 'self.android_locators.BLUETOOTH_UNPAIR_BUTTON_ByXPATH'\n self.do_not_ask_again_checkbox = 'self.android_locators.DO_NOT_ASK_AGAIN_CHECKBOX_ByXPATH'\n self.bluetooth_connected_indicator = 'self.android_locators.BLUETOOTH_CONNECTED_INDICATOR_ByXPATH'\n self.bluetooth_tab = 'self.android_locators.SETTINGS_BT_TAB_ByXPATH'\n self.bluetooth_button_on_off_button = 'self.android_locators.BLUETOOTH_ON_OFF_ByID'\n self.bluetooth_button = 'self.android_locators.BLUETOOTH_BUTTON_ByXPATH'\n self.bluetooth_on_off_check = 'self.android_locators.BLUETOOTH_ON_OFF_CHECK_ByXPATH'\n self.paired_devices_show_more_less = 'self.android_locators.PAIRED_DEVICES_SHOW_MORE_LESS_ByID'\n self.paired_device_text = 'self.android_locators.PAIRED_DEVICE_TEXT_ByXPATH'\n self.paired_device_list = 'self.android_locators.BLUETOOTH_PAIRED_DEVICE_LIST_ByXPATH'\n self.bluetooth_pair_device = 'self.android_locators.BLUETOOTH_CONNECT_DEVICE_ByXPATH'\n self.bluetooth_pair_new_device_refresh_button = \\\n 'self.android_locators.BLUETOOTH_PAIR_NEW_DEVICE_REFRESH_ByXPATH'\n self.bluetooth_contact_access_allow_button = 'self.android_locators.BLUETOOTH_CONTACT_ACCESS_ALLOW_BUTTON_ByXPATH'\n self.bluetooth_pair_button = 'self.android_locators.BLUETOOTH_PAIR_BUTTON_ByXPATH'\n self.bluetooth_pair_failed_button = 'self.android_locators.BLUETOOTH_PAIR_FAILED_BUTTON_ByXPATH'\n self.bluetooth_device_name_cancel_button = 'self.android_locators.BLUETOOTH_DEVICE_NAME_CANCEL_BUTTON_ByXPATH'\n self.bluetooth_device_scan_bar = 'self.android_locators.BLUETOOTH_DEVICE_SCAN_BAR_ByXPATH'\n self.bluetooth_pair_device_with_scan_bar = 'self.android_locators.BLUETOOTH_CONNECT_DEVICE_WITH_SCAN_BAR_ByXPATH'\n self.bluetooth_pair_device_with_title = 'self.android_locators.BLUETOOTH_CONNECT_DEVICE_WITH_TITLE_ByXPATH'\n self.bluetooth_connection_status = \\\n 'self.android_locators.BLUETOOTH_CONNECTION_STATUS_ByXPATH'\n self.bluetooth_connected_device_list = \\\n 'self.android_locators.BLUETOOTH_CONNECTED_DEVICE_LIST_ByXPATH'\n self.bluetooth_discovered_device_list = \\\n 'self.android_locators.BLUETOOTH_LIST_OF_AVAILABLE_DEVICES_ByXPATH'\n self.bluetooth_settings_button = \\\n 'self.android_locators.BLUETOOTH_CONNECTION_SETTINGS_ByID'\n self.bluetooth_not_connected_device = \\\n 'self.android_locators.BLUETOOTH_CONNECTED_DEVICE_LIST_ByXPATH'\n self.bluetooth_more_options_button = \\\n 'self.android_locators.BLUETOOTH_MORE_OPTIONS_ByXPATH'\n self.device_name = \\\n 'self.android_locators.BLUETOOTH_RENAME_BUTTON_ByXPATH'\n self.device_name_text_box = \\\n 'self.android_locators.BLUETOOTH_EDIT_TEXTBOX_ByID'\n self.set_name_button = \\\n 'self.android_locators.BLUETOOTH_SET_NAME_BUTOON_ByXPATH'\n self.bluetooth_mac_addess = \\\n 'self.android_locators.BLUETOOTH_MAC_ADDRESS_ByXPATH'\n self.general_button_settings = \\\n 'self.android_locators.ABOUT_PHONE_BUTTON_ByXPATH'\n self.status_button = 'self.android_locators.STATUS_BUTTON_ByXPATH'\n self.contact_sharing_button = \\\n 'self.android_locators.BLUETOOTH_ENABLE_CONTACT_SHARING_BUTTON_ByXPATH'\n self.contact_sharing_confirmation_ok_button = \\\n 'self.android_locators.BLUETOOTH_ENABLE_CONTACT_SHARING_CONFIRM_OK_BUTTON_ByXPATH'\n self.device_to_connect_after_swipe = \\\n 'self.android_locators.BLUETOOTH_DEVICE_TO_CONNECT_AFTER_SWIPE_ByID'\n self.pop_up_ok_button = \\\n 'self.android_locators.BLUETOOTH_POP_UP_OK_BUTTON_ByXPATH'\n self.connected_device_button = \\\n 'self.android_locators.CONNECTED_DEVICES_BUTTON_ByXPATH'\n self.connection_references = \\\n 'self.android_locators.CONNECTION_REFERENCES_BUTTON_ByXPATH'\n self.bluetooth_device_setting_button = \\\n 'self.android_locators.BLUETOOTH_DEVICE_SETTINGS_BUTTON_ByID'\n if (self.phone_info.os_version.startswith('6.')) or '8.0.0' in self.phone_info.os_version:\n # Special override for 6.X phones. Fixes my Nexus 6P/Android\n # 6.0.1 phone. Not sure about any others...\n # TODO: Once we get a way to figure out a phone's model,\n # make the Android 8.0.0 check *not* apply to the Nexus 6P.\n # Looks like the above now is also used on Android 8.0.0. As\n # far as I can tell this breaks the Nexus 6P...\n # But, based on the commit history, I think this was added\n # for the Samsung phones.\n self.bluetooth_device_setting_button = \\\n 'self.android_locators.BLUETOOTH_DEVICE_SETTINGS_BUTTON_6_0_1_ByID'\n self.contact_sharing_checkbox = \\\n 'self.android_locators.BLUETOOTH_ENABLE_CONTACT_SHARING_CHECKBOX_ByXPATH'\n self.media_sharing_switch = \\\n 'self.android_locators.BLUETOOTH_MEDIA_SHARING_SWITCH_ByXPATH'\n self.media_sharing_button = \\\n 'self.android_locators.BLUETOOTH_ENABLE_DISABLE_MEDIA_SHARING_ByXPATH'\n self.bluetooth_pair_new_device_in_android_8_1_button = \\\n 'self.android_locators.BLUETOOTH_ADD_NEW_DEVICE_IN_8_1_ByXPATH'\n self.contact_sharing_button_in_android_8_1_switch = \\\n 'self.android_locators.BLUETOOTH_ENABLE_CONTACT_SHARING_SWITCH_IN_8_1_ByXPATH'\n self.pop_up_ok_button = \\\n 'self.android_locators.BLUETOOTH_POP_UP_OK_BUTTON_ByXPATH'\n self.bluetooth_status_summary = \\\n 'self.android_locators.BLUETOOTH_STATUS_SUMMARY_ByXPATH'\n self.previously_paired_device_button = \\\n 'self.android_locators.BLUETOOTH_PREVIUSLY_CONNECTED_DEVICE_BUTTON_ByXPATH'\n self.bluetooth_more_options = \\\n 'self.android_locators.SAMSUNG_CONTACT_MORE_DETAILS_ByXPATH'\n self.media_volume_text = \\\n 'self.android_locators.BLUETOOTH_MEDIA_VOLUME_SYNC_ByXPATH'\n self.media_volume_sync_switch = \\\n 'self.android_locators.BLUETOOTH_VOLUME_SYNC_SWITCH_ByID'\n\n elif self.phone_info.phone_type == PhoneType.IOS:\n self.package_name = get_ios_settings_bundle_id()\n self.activity_name = get_ios_settings_app_package()\n self.bluetooth_button_on_off_button = \\\n 'self.ios_locators.BLUETOOTH_ON_OFF_BUTTON_ByXPATH'\n self.bluetooth_button = 'self.ios_locators.BLUETOOTH_ByXPATH'\n self.paired_device_list = \\\n 'self.ios_locators.BLUETOOTH_IS_PAIRED_DEVICE_LIST_ByXPATH'\n self.bluetooth_connection_status = \\\n 'self.ios_locators.BLUETOOTH_VERIFY_DEVICE_CONNECTED_ByXPATH'\n self.bluetooth_connected_device_list = \\\n 'self.ios_locators.BLUETOOTH_LIST_OF_CONNECTED_DEVICES_ByXPATH'\n self.bluetooth_discovered_device_list = \\\n 'self.ios_locators.BLUETOOTH_LIST_OF_SCANNED_DEVICES_ByXPATH'\n self.bluetooth_paired_device_list = \\\n 'self.ios_locators.BLUETOOTH_IS_PAIRED_DEVICE_LIST_ByXPATH'\n self.bluetooth_pair_device = \\\n 'self.ios_locators.BLUETOOTH_DEVICES_LIST_ByXPATH'\n self.paired_device_list = \\\n 'self.ios_locators.BLUETOOTH_IS_PAIRED_DEVICE_LIST_ByXPATH'\n self.bluetooth_settings_button = \\\n 'self.ios_locators.BLUETOOTH_MORE_INFO_BUTTON_ByXPATH'\n self.bluetooth_not_connected_device = \\\n 'self.ios_locators.BLUETOOTH_LIST_OF_NOT_CONNECTED_DEVICES_ByXPATH'\n self.general_button_settings = \\\n 'self.ios_locators.GENERAL_BUTTON_ByXPATH'\n self.status_button = 'self.ios_locators.ABOUT_BUTTON_ByXPATH'\n self.device_name = 'self.ios_locators.NAME_BUTTON_ByXPATH'\n self.device_name_text_box = \\\n 'self.ios_locators.DEVICE_NAME_TEXTFIELD_ByXPATH'\n self.set_name_button = 'self.ios_locators.DONE_BUTTON_ByXPATH'\n self.bluetooth_mac_addess = \\\n 'self.ios_locators.BLUETOOTH_MAC_ADDRESS_ByXPATH'\n self.device_to_connect_after_swipe = \\\n 'self.ios_locators.BLUETOOTH_DEVICE_TO_CONNECT_AFTER_SWIPE_ByXPATH'\n self.contact_sharing_button = \\\n 'self.ios_locators.CONTACT_SHARING_SWITCH_ByXPATH'\n self.bluetooth_device_setting_button = \\\n 'self.ios_locators.BLUETOOTH_DEVICE_SETTINGS_BUTTON_ByID'\n self.pop_up_ok_button = \\\n 'self.ios_locators.BLUETOOTH_POP_UP_OK_BUTTON_ByXPATH'",
"def ble_device_matches(\n matcher: BluetoothCallbackMatcher | BluetoothMatcher,\n service_info: BluetoothServiceInfoBleak,\n) -> bool:\n device = service_info.device\n if (address := matcher.get(ADDRESS)) is not None and device.address != address:\n return False\n\n if matcher.get(CONNECTABLE, True) and not service_info.connectable:\n return False\n\n advertisement_data = service_info.advertisement\n if (\n service_uuid := matcher.get(SERVICE_UUID)\n ) is not None and service_uuid not in advertisement_data.service_uuids:\n return False\n\n if (\n service_data_uuid := matcher.get(SERVICE_DATA_UUID)\n ) is not None and service_data_uuid not in advertisement_data.service_data:\n return False\n\n if (\n manfacturer_id := matcher.get(MANUFACTURER_ID)\n ) is not None and manfacturer_id not in advertisement_data.manufacturer_data:\n return False\n\n if (manufacturer_data_start := matcher.get(MANUFACTURER_DATA_START)) is not None:\n manufacturer_data_start_bytes = bytearray(manufacturer_data_start)\n if not any(\n manufacturer_data.startswith(manufacturer_data_start_bytes)\n for manufacturer_data in advertisement_data.manufacturer_data.values()\n ):\n return False\n\n if (local_name := matcher.get(LOCAL_NAME)) is not None and (\n (device_name := advertisement_data.local_name or device.name) is None\n or not _memorized_fnmatch(\n device_name,\n local_name,\n )\n ):\n return False\n\n return True",
"def getControllers():\n \n # Check enabled bluetooth service\n changeBluetoothService(enable=True)\n \n #proc = os.popen('bluetoothctl list').read()\n blueListStdout = execCommand('bluetoothctl list')\n \n # Get controller's MAC and name to list\n cntList = list()\n for line in blueListStdout.splitlines():\n lst = line.split()\n del lst[0]\n del lst[-1]\n \n if lst:\n cntList.append(lst)\n \n return cntList",
"def bluetooth_listen(\n addr, threshold, callback, sleep=1, daily=True, debug=False):\n b = BluetoothRSSI(addr=addr)\n while True:\n rssi = b.request_rssi()\n if debug:\n print(\"---\")\n print(\"addr: {}, rssi: {}\".format(addr, rssi))\n # Sleep and then skip to next iteration if device not found\n if rssi is None:\n time.sleep(sleep)\n continue\n # Trigger if RSSI value is within threshold\n if threshold[0] < rssi < threshold[1]:\n callback()\n if daily:\n # Calculate the time remaining until next day\n now = datetime.datetime.now()\n tomorrow = datetime.datetime(\n now.year, now.month, now.day, 0, 0, 0, 0) + \\\n datetime.timedelta(days=1)\n until_tomorrow = (tomorrow - now).seconds\n if debug:\n print(\"Seconds until tomorrow: {}\".format(until_tomorrow))\n else:\n time.sleep(until_tomorrow)\n # Delay between iterations\n time.sleep(sleep)"
] | [
"0.6978275",
"0.67073125",
"0.6545722",
"0.6475751",
"0.6446607",
"0.63599527",
"0.6293589",
"0.6150195",
"0.60710776",
"0.59620595",
"0.58640736",
"0.5854606",
"0.57990634",
"0.5784767",
"0.5665145",
"0.5649242",
"0.5633475",
"0.56149334",
"0.5598885",
"0.5506579",
"0.5474487",
"0.54387915",
"0.5433412",
"0.543088",
"0.5414126",
"0.5400879",
"0.5400508",
"0.5371653",
"0.5354755",
"0.5340079"
] | 0.75675184 | 0 |
Set up the Spotify platform. | def async_setup_spotify(hass, config, configurator):
return async_setup(hass, config) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def async_setup(hass, config):\n import spotipy.oauth2\n import json\n global AIS_SPOTIFY_TOKEN\n\n try:\n ws_resp = aisCloud.key(\"spotify_oauth\")\n json_ws_resp = ws_resp.json()\n spotify_redirect_url = json_ws_resp[\"SPOTIFY_REDIRECT_URL\"]\n spotify_client_id = json_ws_resp[\"SPOTIFY_CLIENT_ID\"]\n spotify_client_secret = json_ws_resp[\"SPOTIFY_CLIENT_SECRET\"]\n spotify_scope = json_ws_resp[\"SPOTIFY_SCOPE\"]\n try:\n ws_resp = aisCloud.key(\"spotify_token\")\n key = ws_resp.json()[\"key\"]\n AIS_SPOTIFY_TOKEN = json.loads(key)\n except:\n AIS_SPOTIFY_TOKEN = None\n _LOGGER.info(\"No AIS_SPOTIFY_TOKEN\")\n except Exception as e:\n _LOGGER.error(\"No spotify oauth info: \" + str(e))\n return False\n\n cache = hass.config.path(DEFAULT_CACHE_PATH)\n gate_id = ais_global.get_sercure_android_id_dom()\n oauth = spotipy.oauth2.SpotifyOAuth(spotify_client_id, spotify_client_secret, spotify_redirect_url,\n scope=spotify_scope, cache_path=cache, state=gate_id)\n token_info = oauth.get_cached_token()\n if not token_info:\n _LOGGER.info(\"no spotify token in cache;\")\n if AIS_SPOTIFY_TOKEN is not None:\n with open(cache, 'w') as outfile:\n json.dump(AIS_SPOTIFY_TOKEN, outfile)\n token_info = oauth.get_cached_token()\n if not token_info:\n _LOGGER.info(\"no spotify token; run configurator\")\n async_request_configuration(hass, config, oauth)\n return True\n\n if hass.data.get(DOMAIN):\n configurator = hass.components.configurator\n configurator.request_done(hass.data.get(DOMAIN))\n del hass.data[DOMAIN]\n\n # register services\n data = hass.data[DOMAIN] = SpotifyData(hass, oauth)\n\n # service = configured_service(hass)\n\n @asyncio.coroutine\n def search(call):\n _LOGGER.info(\"search \" + str(call))\n yield from data.process_search_async(call)\n\n def select_track_name(call):\n _LOGGER.info(\"select_track_name\")\n data.process_select_track_name(call)\n\n def change_serive(call):\n _LOGGER.info(\"change_serive\")\n data.change_serive(call)\n\n hass.services.async_register(DOMAIN, 'search', search)\n hass.services.async_register(DOMAIN, 'select_track_name', select_track_name)\n hass.services.async_register(DOMAIN, 'change_serive', change_serive)\n\n return True",
"def setup_method(self):\n self.hass = get_test_home_assistant()\n\n self.config = {\n ip.DOMAIN: {\n \"platform\": \"microsoft_face_identify\",\n \"source\": {\"entity_id\": \"camera.demo_camera\", \"name\": \"test local\"},\n \"group\": \"Test Group1\",\n },\n \"camera\": {\"platform\": \"demo\"},\n mf.DOMAIN: {\"api_key\": \"12345678abcdef6\"},\n }\n\n self.endpoint_url = f\"https://westus.{mf.FACE_API_URL}\"",
"def setup():\n require('hosts', 'project_path', provided_by=envs.ENVS)\n\n if not exists(env.project_path):\n abort(red('Project path ({project_path}) does not exist. '\n 'Create it on the server before continuing.'.format(**env)))\n\n with cd(env.project_path):\n run('mkdir -p api renderer conf markup_renderer')\n run('mkdir -p api/static api/uploads')\n\n make_release_folders('api')\n make_release_folders('renderer')",
"def setUp(self):\n self._plugin = spotlight_volume.SpotlightVolumePlugin()\n self._parser = plist.PlistParser()",
"def configure(self):\n\n self.platform.configure()",
"async def async_setup(hass, config):\n conf = config[DOMAIN]\n\n username = conf[CONF_USERNAME]\n password = conf[CONF_PASSWORD]\n accounts = conf.get(CONF_ACCOUNTS)\n\n @callback\n def websocket_handle_playlists(hass, connection, msg):\n \"\"\"Handle get playlist\"\"\"\n import spotipy\n access_token, expires = get_spotify_token(username=username, password=password)\n client = spotipy.Spotify(auth=access_token)\n resp = client._get('views/made-for-x?content_limit=10&locale=en&platform=web&types=album%2Cplaylist%2Cartist%2Cshow%2Cstation', limit=10,\n offset=0)\n connection.send_message(\n websocket_api.result_message(msg[\"id\"], resp)\n )\n\n def get_spotify_token(username, password):\n import spotify_token as st\n data = st.start_session(username, password)\n access_token = data[0]\n # token_expires = data[1]\n expires = data[1] - int(time.time())\n return access_token, expires\n\n def play(client, spotify_device_id, uri, random_song, repeat):\n # import spotipy\n # import http.client as http_client\n # spotipy.trace = True\n # spotipy.trace_out = True\n # http_client.HTTPConnection.debuglevel = 1\n\n _LOGGER.debug('Version: %s, playing URI: %s on device-id: %s', _VERSION, uri, spotify_device_id)\n if uri.find('track') > 0:\n _LOGGER.debug('Playing track using uris= for uri: %s', uri)\n client.start_playback(device_id=spotify_device_id, uris=[uri])\n else:\n if uri == 'random':\n _LOGGER.debug('Cool, you found the easter egg with playing a random playlist')\n playlists = client.user_playlists('me', 50)\n no_playlists = len(playlists['items'])\n uri = playlists['items'][random.randint(0, no_playlists - 1)]['uri']\n kwargs = {'device_id': spotify_device_id, 'context_uri': uri}\n if random_song:\n results = client.user_playlist_tracks(\"me\", uri)\n position = random.randint(0, results['total'] - 1)\n _LOGGER.debug('Start playback at random position: %s', position)\n kwargs['offset'] = {'position': position}\n\n _LOGGER.debug('Playing context uri using context_uri for uri: \"%s\" (random_song: %s)', uri, random_song)\n client.start_playback(**kwargs)\n if repeat:\n _LOGGER.debug('Turning repeat on')\n time.sleep(5)\n client.repeat(state=repeat, device_id=spotify_device_id)\n\n def get_account_credentials(call):\n \"\"\" Get credentials for account \"\"\"\n account = call.data.get(CONF_SPOTIFY_ACCOUNT)\n user = username\n pwd = password\n if account is not None:\n _LOGGER.debug('setting up with different account than default %s', account)\n user = accounts.get(account).get(CONF_USERNAME)\n pwd = accounts.get(account).get(CONF_PASSWORD)\n return user, pwd\n\n def shouldTransferPlayback(call, client):\n \"\"\" Check if something is playing \"\"\"\n uri = call.data.get(CONF_SPOTIFY_URI)\n if uri is None or uri.strip() == '' or call.data.get(CONF_TRANSFER_PLAYBACK):\n current_playback = client.current_playback()\n if current_playback is not None:\n _LOGGER.debug('current_playback from spotipy: %s', current_playback)\n return True\n return False\n\n async def start_casting(call):\n \"\"\"service called.\"\"\"\n import spotipy\n\n uri = call.data.get(CONF_SPOTIFY_URI)\n random_song = call.data.get(CONF_RANDOM, False)\n repeat = call.data.get(CONF_REPEAT)\n\n # Account\n user, pwd = get_account_credentials(call)\n\n # login as real browser to get powerful token\n access_token, expires = get_spotify_token(username=user, password=pwd)\n\n # get the spotify web api client\n client = spotipy.Spotify(auth=access_token)\n\n # launch the app on chromecast\n spotify_cast_device = SpotifyCastDevice(hass, call.data.get(CONF_DEVICE_NAME), call.data.get(CONF_ENTITY_ID))\n spotify_cast_device.startSpotifyController(access_token, expires)\n spotify_device_id = spotify_cast_device.getSpotifyDeviceId(client)\n\n transfer_playback = shouldTransferPlayback(call, client)\n if transfer_playback == True:\n _LOGGER.debug('Transfering playback')\n client.transfer_playback(\n device_id=spotify_device_id, force_play=True)\n else:\n play(client, spotify_device_id, uri, random_song, repeat)\n\n # Register websocket and service\n hass.components.websocket_api.async_register_command(\n WS_TYPE_SPOTCAST_PLAYLISTS, websocket_handle_playlists, SCHEMA_PLAYLISTS\n )\n\n hass.services.async_register(DOMAIN, 'start', start_casting,\n schema=SERVICE_START_COMMAND_SCHEMA)\n\n return True",
"async def async_setup_entry(hass, config_entry):\n # setup the Spotify\n if AIS_SPOTIFY_TOKEN is None:\n # remove configurator\n # configurator = hass.components.configurator\n # req_config = _CONFIGURING.pop(OAUTH_CLIENT_ID)\n # configurator.request_done(req_config)\n\n await async_setup(hass, hass.config)\n return True",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup_platform(hass, config, add_devices, discovery_info=None):\n token = load_token(hass)\n \n if not token:\n request_app_setup(hass, config, add_devices, discovery_info)\n else:\n continue_setup_platform(hass, config, token, add_devices, discovery_info)",
"def setup(self) -> None:\n pass",
"def setup(self) -> None:\n pass",
"def setup(self) -> None:\n pass",
"def _setup(self):\n pass",
"def _setup(self):\n pass",
"def _setup(self):\n pass",
"def _setup(self):\n pass",
"def _setup(self):\n pass",
"def setup():\n\n with cd(env.homedir):\n\n # clone repository from github\n sudo('git clone https://github.com/starzel/demo.starzel.de.git', user=env.deploy_user) # noqa: E501\n\n with cd(env.directory):\n\n # requirements\n # sudo('python python-dev build-essential zlib1g-dev libssl-dev libxml2-dev libxslt1-dev wv poppler-utils libtiff5-dev libjpeg62-dev zlib1g-dev libfreetype6-dev liblcms1-dev libwebp-dev') # noqa: E501\n\n # prepare buildout\n sudo('ln -s local_production.cfg local.cfg', user=env.deploy_user)\n sudo('echo -e \"[buildout]\\nlogin = admin\\npassword = admin\" > secret.cfg', user=env.deploy_user) # noqa: E501\n\n # bootstrap and run bildout once\n sudo('./bin/pip install -r requirements.txt', user=env.deploy_user)\n sudo('./bin/buildout', user=env.deploy_user)\n\n # start supervisor which starts plone instance also\n sudo('./bin/supervisord', user=env.deploy_user)",
"def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n\n host = config[CONF_HOST]\n port = config[CONF_PORT]\n token = config.get(CONF_ACCESS_TOKEN)\n\n client = ClementineRemote(host, port, token, reconnect=True)\n\n add_entities([ClementineDevice(client, config[CONF_NAME])])"
] | [
"0.6380721",
"0.6271231",
"0.61243886",
"0.6054209",
"0.6025928",
"0.5995937",
"0.59487516",
"0.59220606",
"0.59220606",
"0.59220606",
"0.59220606",
"0.59220606",
"0.59220606",
"0.59220606",
"0.59220606",
"0.59220606",
"0.59220606",
"0.59220606",
"0.59220606",
"0.5921665",
"0.5888226",
"0.5888226",
"0.5888226",
"0.5878937",
"0.5878937",
"0.5878937",
"0.5878937",
"0.5878937",
"0.5878752",
"0.58730245"
] | 0.63484156 | 1 |
Set up the Spotify platform. | def async_setup(hass, config):
import spotipy.oauth2
import json
global AIS_SPOTIFY_TOKEN
try:
ws_resp = aisCloud.key("spotify_oauth")
json_ws_resp = ws_resp.json()
spotify_redirect_url = json_ws_resp["SPOTIFY_REDIRECT_URL"]
spotify_client_id = json_ws_resp["SPOTIFY_CLIENT_ID"]
spotify_client_secret = json_ws_resp["SPOTIFY_CLIENT_SECRET"]
spotify_scope = json_ws_resp["SPOTIFY_SCOPE"]
try:
ws_resp = aisCloud.key("spotify_token")
key = ws_resp.json()["key"]
AIS_SPOTIFY_TOKEN = json.loads(key)
except:
AIS_SPOTIFY_TOKEN = None
_LOGGER.info("No AIS_SPOTIFY_TOKEN")
except Exception as e:
_LOGGER.error("No spotify oauth info: " + str(e))
return False
cache = hass.config.path(DEFAULT_CACHE_PATH)
gate_id = ais_global.get_sercure_android_id_dom()
oauth = spotipy.oauth2.SpotifyOAuth(spotify_client_id, spotify_client_secret, spotify_redirect_url,
scope=spotify_scope, cache_path=cache, state=gate_id)
token_info = oauth.get_cached_token()
if not token_info:
_LOGGER.info("no spotify token in cache;")
if AIS_SPOTIFY_TOKEN is not None:
with open(cache, 'w') as outfile:
json.dump(AIS_SPOTIFY_TOKEN, outfile)
token_info = oauth.get_cached_token()
if not token_info:
_LOGGER.info("no spotify token; run configurator")
async_request_configuration(hass, config, oauth)
return True
if hass.data.get(DOMAIN):
configurator = hass.components.configurator
configurator.request_done(hass.data.get(DOMAIN))
del hass.data[DOMAIN]
# register services
data = hass.data[DOMAIN] = SpotifyData(hass, oauth)
# service = configured_service(hass)
@asyncio.coroutine
def search(call):
_LOGGER.info("search " + str(call))
yield from data.process_search_async(call)
def select_track_name(call):
_LOGGER.info("select_track_name")
data.process_select_track_name(call)
def change_serive(call):
_LOGGER.info("change_serive")
data.change_serive(call)
hass.services.async_register(DOMAIN, 'search', search)
hass.services.async_register(DOMAIN, 'select_track_name', select_track_name)
hass.services.async_register(DOMAIN, 'change_serive', change_serive)
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def async_setup_spotify(hass, config, configurator):\n return async_setup(hass, config)",
"def setup_method(self):\n self.hass = get_test_home_assistant()\n\n self.config = {\n ip.DOMAIN: {\n \"platform\": \"microsoft_face_identify\",\n \"source\": {\"entity_id\": \"camera.demo_camera\", \"name\": \"test local\"},\n \"group\": \"Test Group1\",\n },\n \"camera\": {\"platform\": \"demo\"},\n mf.DOMAIN: {\"api_key\": \"12345678abcdef6\"},\n }\n\n self.endpoint_url = f\"https://westus.{mf.FACE_API_URL}\"",
"def setup():\n require('hosts', 'project_path', provided_by=envs.ENVS)\n\n if not exists(env.project_path):\n abort(red('Project path ({project_path}) does not exist. '\n 'Create it on the server before continuing.'.format(**env)))\n\n with cd(env.project_path):\n run('mkdir -p api renderer conf markup_renderer')\n run('mkdir -p api/static api/uploads')\n\n make_release_folders('api')\n make_release_folders('renderer')",
"def setUp(self):\n self._plugin = spotlight_volume.SpotlightVolumePlugin()\n self._parser = plist.PlistParser()",
"def configure(self):\n\n self.platform.configure()",
"async def async_setup(hass, config):\n conf = config[DOMAIN]\n\n username = conf[CONF_USERNAME]\n password = conf[CONF_PASSWORD]\n accounts = conf.get(CONF_ACCOUNTS)\n\n @callback\n def websocket_handle_playlists(hass, connection, msg):\n \"\"\"Handle get playlist\"\"\"\n import spotipy\n access_token, expires = get_spotify_token(username=username, password=password)\n client = spotipy.Spotify(auth=access_token)\n resp = client._get('views/made-for-x?content_limit=10&locale=en&platform=web&types=album%2Cplaylist%2Cartist%2Cshow%2Cstation', limit=10,\n offset=0)\n connection.send_message(\n websocket_api.result_message(msg[\"id\"], resp)\n )\n\n def get_spotify_token(username, password):\n import spotify_token as st\n data = st.start_session(username, password)\n access_token = data[0]\n # token_expires = data[1]\n expires = data[1] - int(time.time())\n return access_token, expires\n\n def play(client, spotify_device_id, uri, random_song, repeat):\n # import spotipy\n # import http.client as http_client\n # spotipy.trace = True\n # spotipy.trace_out = True\n # http_client.HTTPConnection.debuglevel = 1\n\n _LOGGER.debug('Version: %s, playing URI: %s on device-id: %s', _VERSION, uri, spotify_device_id)\n if uri.find('track') > 0:\n _LOGGER.debug('Playing track using uris= for uri: %s', uri)\n client.start_playback(device_id=spotify_device_id, uris=[uri])\n else:\n if uri == 'random':\n _LOGGER.debug('Cool, you found the easter egg with playing a random playlist')\n playlists = client.user_playlists('me', 50)\n no_playlists = len(playlists['items'])\n uri = playlists['items'][random.randint(0, no_playlists - 1)]['uri']\n kwargs = {'device_id': spotify_device_id, 'context_uri': uri}\n if random_song:\n results = client.user_playlist_tracks(\"me\", uri)\n position = random.randint(0, results['total'] - 1)\n _LOGGER.debug('Start playback at random position: %s', position)\n kwargs['offset'] = {'position': position}\n\n _LOGGER.debug('Playing context uri using context_uri for uri: \"%s\" (random_song: %s)', uri, random_song)\n client.start_playback(**kwargs)\n if repeat:\n _LOGGER.debug('Turning repeat on')\n time.sleep(5)\n client.repeat(state=repeat, device_id=spotify_device_id)\n\n def get_account_credentials(call):\n \"\"\" Get credentials for account \"\"\"\n account = call.data.get(CONF_SPOTIFY_ACCOUNT)\n user = username\n pwd = password\n if account is not None:\n _LOGGER.debug('setting up with different account than default %s', account)\n user = accounts.get(account).get(CONF_USERNAME)\n pwd = accounts.get(account).get(CONF_PASSWORD)\n return user, pwd\n\n def shouldTransferPlayback(call, client):\n \"\"\" Check if something is playing \"\"\"\n uri = call.data.get(CONF_SPOTIFY_URI)\n if uri is None or uri.strip() == '' or call.data.get(CONF_TRANSFER_PLAYBACK):\n current_playback = client.current_playback()\n if current_playback is not None:\n _LOGGER.debug('current_playback from spotipy: %s', current_playback)\n return True\n return False\n\n async def start_casting(call):\n \"\"\"service called.\"\"\"\n import spotipy\n\n uri = call.data.get(CONF_SPOTIFY_URI)\n random_song = call.data.get(CONF_RANDOM, False)\n repeat = call.data.get(CONF_REPEAT)\n\n # Account\n user, pwd = get_account_credentials(call)\n\n # login as real browser to get powerful token\n access_token, expires = get_spotify_token(username=user, password=pwd)\n\n # get the spotify web api client\n client = spotipy.Spotify(auth=access_token)\n\n # launch the app on chromecast\n spotify_cast_device = SpotifyCastDevice(hass, call.data.get(CONF_DEVICE_NAME), call.data.get(CONF_ENTITY_ID))\n spotify_cast_device.startSpotifyController(access_token, expires)\n spotify_device_id = spotify_cast_device.getSpotifyDeviceId(client)\n\n transfer_playback = shouldTransferPlayback(call, client)\n if transfer_playback == True:\n _LOGGER.debug('Transfering playback')\n client.transfer_playback(\n device_id=spotify_device_id, force_play=True)\n else:\n play(client, spotify_device_id, uri, random_song, repeat)\n\n # Register websocket and service\n hass.components.websocket_api.async_register_command(\n WS_TYPE_SPOTCAST_PLAYLISTS, websocket_handle_playlists, SCHEMA_PLAYLISTS\n )\n\n hass.services.async_register(DOMAIN, 'start', start_casting,\n schema=SERVICE_START_COMMAND_SCHEMA)\n\n return True",
"async def async_setup_entry(hass, config_entry):\n # setup the Spotify\n if AIS_SPOTIFY_TOKEN is None:\n # remove configurator\n # configurator = hass.components.configurator\n # req_config = _CONFIGURING.pop(OAUTH_CLIENT_ID)\n # configurator.request_done(req_config)\n\n await async_setup(hass, hass.config)\n return True",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup_platform(hass, config, add_devices, discovery_info=None):\n token = load_token(hass)\n \n if not token:\n request_app_setup(hass, config, add_devices, discovery_info)\n else:\n continue_setup_platform(hass, config, token, add_devices, discovery_info)",
"def setup(self) -> None:\n pass",
"def setup(self) -> None:\n pass",
"def setup(self) -> None:\n pass",
"def _setup(self):\n pass",
"def _setup(self):\n pass",
"def _setup(self):\n pass",
"def _setup(self):\n pass",
"def _setup(self):\n pass",
"def setup():\n\n with cd(env.homedir):\n\n # clone repository from github\n sudo('git clone https://github.com/starzel/demo.starzel.de.git', user=env.deploy_user) # noqa: E501\n\n with cd(env.directory):\n\n # requirements\n # sudo('python python-dev build-essential zlib1g-dev libssl-dev libxml2-dev libxslt1-dev wv poppler-utils libtiff5-dev libjpeg62-dev zlib1g-dev libfreetype6-dev liblcms1-dev libwebp-dev') # noqa: E501\n\n # prepare buildout\n sudo('ln -s local_production.cfg local.cfg', user=env.deploy_user)\n sudo('echo -e \"[buildout]\\nlogin = admin\\npassword = admin\" > secret.cfg', user=env.deploy_user) # noqa: E501\n\n # bootstrap and run bildout once\n sudo('./bin/pip install -r requirements.txt', user=env.deploy_user)\n sudo('./bin/buildout', user=env.deploy_user)\n\n # start supervisor which starts plone instance also\n sudo('./bin/supervisord', user=env.deploy_user)",
"def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n\n host = config[CONF_HOST]\n port = config[CONF_PORT]\n token = config.get(CONF_ACCESS_TOKEN)\n\n client = ClementineRemote(host, port, token, reconnect=True)\n\n add_entities([ClementineDevice(client, config[CONF_NAME])])"
] | [
"0.63484156",
"0.6271231",
"0.61243886",
"0.6054209",
"0.6025928",
"0.5995937",
"0.59487516",
"0.59220606",
"0.59220606",
"0.59220606",
"0.59220606",
"0.59220606",
"0.59220606",
"0.59220606",
"0.59220606",
"0.59220606",
"0.59220606",
"0.59220606",
"0.59220606",
"0.5921665",
"0.5888226",
"0.5888226",
"0.5888226",
"0.5878937",
"0.5878937",
"0.5878937",
"0.5878937",
"0.5878937",
"0.5878752",
"0.58730245"
] | 0.6380721 | 0 |
asks for destination base | def destination_base_input(destination_base):
if 2 <= destination_base <= 9:
return destination_base
else:
print("invalid input") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getDest(): #status: Done, Tested\r\n pass",
"def _GuessBase(self, required):\r\n url = self._GetInfo(\"URL\")\r\n if url:\r\n scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)\r\n guess = \"\"\r\n # TODO(anatoli) - repository specific hacks should be handled by server\r\n if netloc == \"svn.python.org\" and scheme == \"svn+ssh\":\r\n path = \"projects\" + path\r\n scheme = \"http\"\r\n guess = \"Python \"\r\n elif netloc.endswith(\".googlecode.com\"):\r\n scheme = \"http\"\r\n guess = \"Google Code \"\r\n path = path + \"/\"\r\n base = urlparse.urlunparse((scheme, netloc, path, params,\r\n query, fragment))\r\n logging.info(\"Guessed %sbase = %s\", guess, base)\r\n return base\r\n if required:\r\n ErrorExit(\"Can't find URL in output from svn info\")\r\n return None",
"def base_dir(self, value):\n pass",
"def _get_URL_base(self, request, step):\n index = request.path.find(step.slug)\n\n return request.path[:index]",
"def destination(self) -> pulumi.Input['DestinationArgs']:\n return pulumi.get(self, \"destination\")",
"def base_dir(self):\n pass",
"def destination(self) -> str:\n return pulumi.get(self, \"destination\")",
"def base_path(self):\n return self.setup.base_path",
"def _GuessBase(self, required):\n info = RunShell([\"svn\", \"info\"])\n for line in info.splitlines():\n words = line.split()\n if len(words) == 2 and words[0] == \"URL:\":\n url = words[1]\n scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)\n username, netloc = urllib.splituser(netloc)\n if username:\n logging.info(\"Removed username from base URL\")\n if netloc.endswith(\"svn.python.org\"):\n if netloc == \"svn.python.org\":\n if path.startswith(\"/projects/\"):\n path = path[9:]\n elif netloc != \"[email protected]\":\n ErrorExit(\"Unrecognized Python URL: %s\" % url)\n base = \"http://svn.python.org/view/*checkout*%s/\" % path\n logging.info(\"Guessed Python base = %s\", base)\n elif netloc.endswith(\"svn.collab.net\"):\n if path.startswith(\"/repos/\"):\n path = path[6:]\n base = \"http://svn.collab.net/viewvc/*checkout*%s/\" % path\n logging.info(\"Guessed CollabNet base = %s\", base)\n elif netloc.endswith(\".googlecode.com\"):\n path = path + \"/\"\n base = urlparse.urlunparse((\"http\", netloc, path, params,\n query, fragment))\n logging.info(\"Guessed Google Code base = %s\", base)\n else:\n path = path + \"/\"\n base = urlparse.urlunparse((scheme, netloc, path, params,\n query, fragment))\n logging.info(\"Guessed base = %s\", base)\n return base\n if required:\n ErrorExit(\"Can't find URL in output from svn info\")\n return None",
"def get_base_path(self) -> str:\n raise NotImplementedError()",
"def get_short_url_base():",
"def relative_path(base, target):\r\n common, base_tail, target_tail = split_common(base, target)\r\n #print \"common:\", common\r\n #print \"base_tail:\", base_tail\r\n #print \"target_tail:\", target_tail\r\n r = len(base_tail) * [os.pardir] + target_tail\r\n if r:\r\n return os.path.join(*r)\r\n else:\r\n return os.curdir",
"def relative_base(base):\n return as_base(base).lstrip('/')",
"def Destination(self) -> _n_0_t_1:",
"def _base_folder(self, base_folder, obj):\n # Large portions of this code came from Products.ATContentTypes\n # TODO: a package to deal with this kind of stuff (string to object?)\n # sanitize a bit: you never know, with all those win users out there\n relPath = base_folder.replace('\\\\', '/')\n if not relPath:\n return self._portal\n if relPath[0] == '/':\n # someone didn't enter a relative path.\n # let's go with it\n path = relPath.split('/')[1:]\n else:\n folders = relPath.split('/')\n\n # set the path to the object path\n path = self._relPathToPortal(aq_parent(obj))\n\n # now construct an aboslute path based on the relative custom path\n # eat away from 'path' whenever we encounter a '..'\n # in the relative path apend all other elements other than ..\n for folder in folders:\n if folder == '..':\n # chop off one level from path\n if path == []:\n # can't chop off more\n # just return this path and leave the loop\n break\n else:\n path = path[:-1]\n elif folder == '.':\n # don't really need this but for being complete\n # strictly speaking some user may use a . aswell\n pass # do nothing\n else:\n path.append(folder)\n\n if not (path == []):\n # As we will traverse from portal, there is no need to\n # have its path in the way\n path = '/'.join(path)\n try:\n baseFolder = self._portal.unrestrictedTraverse(path)\n except (AttributeError, KeyError):\n baseFolder = None\n else:\n baseFolder = self._portal\n return baseFolder",
"def base_path(self):\n return self._base_path",
"def getDestination(source):\n\ti = len(source)-1\n\tif source[i] == '/':\n\t\tsource = source[0:i - 1]\n\twhile i >= 0:\n\t\tif source[i] == '/':\n\t\t\tbreak\n\t\ti -= 1\n\tdestination = source[0: i]\n\treturn destination",
"def test_gen_destination_for_alias_is_destination(self):\n destination = db.gen_destination_for_alias(self.dbm, \"reddit\")\n self.assertIsInstance(destination, db.Destination)\n self.assertEqual(\"https://www.reddit.com/r/{}\", destination.url)",
"def do_destination(self, args):\n self.destination = int(args)",
"def _get_basedir(datadir, target_genome):\n genome_dir = os.path.join(datadir, \"genomes\")\n for dirname in glob.glob(os.path.join(genome_dir, \"*\", \"*\")):\n if dirname.endswith(\"/%s\" % target_genome):\n return dirname",
"def base():\n print(CFG.base.path)",
"def get_destination(self):\n\n return self.destination",
"def test_gen_default_fallback_is_destination(self):\n # Start transaction and add default fallback to database\n # NOTE: Must begin a nested transaction, as autocommit=False by default\n # which automatically starts a transaction when Session is created.\n # See: https://docs.sqlalchemy.org/en/13/orm/session_api.html#sqlalchemy.orm.session.SessionTransaction\n self.session.begin_nested()\n self._insert_default_fallback()\n\n destination = db.gen_default_fallback(self.dbm)\n self.assertEqual(db.Destination, type(destination))\n self.assertEqual(\"https://duckduckgo.com?q={}\", destination.url)\n\n self.session.rollback()",
"def get_target_base_dir(self):\n return self.target_base_dir",
"def get_destination(self):\n return self._destination",
"def _adjust_destination(self, destProps):\n super(PHEME_http_receiverTransferAgent, self).\\\n _adjust_destination(destProps)\n for prop in destProps.iter(tag='property'):\n if prop.attrib['name'] == 'host':\n prop.text = self.options.input_dir",
"def _get_mount_point_base(self):\n raise NotImplementedError('_get_mount_point_base')",
"def _get_base_url(self):\n return '/{}/'.format(self.name.replace('__', '/'))",
"def _select_destination(self):\n # Ideally this should do something clever based on the start location\n # ie known trips. But for now, it will pick randomly!\n station_dict = self.network.station_dict\n\n stations = list(station_dict.keys())\n #stations = [x for x in stations if isinstance(x, int) or x.startswith(\"801\")]\n #stations = [x for x in stations if isinstance(x, int) or x.startswith(\"80139\")]\n weights = [station_dict[x].in_popularity for x in stations]\n\n # pick using the given weight distributions\n self.dest = random.choices(stations, weights=weights)[0]\n\n return",
"def set_basedir(self, host, path):"
] | [
"0.6239136",
"0.6081792",
"0.60812646",
"0.58890224",
"0.58708084",
"0.58682895",
"0.5796034",
"0.57916695",
"0.57642895",
"0.5762684",
"0.57504994",
"0.5716583",
"0.5704606",
"0.5647763",
"0.5647447",
"0.56273687",
"0.56166524",
"0.5603385",
"0.5594626",
"0.5593369",
"0.55924225",
"0.5568949",
"0.55611664",
"0.5521025",
"0.55054355",
"0.5500462",
"0.5492395",
"0.5472809",
"0.5467185",
"0.5458602"
] | 0.65736556 | 0 |
asks user for input decimal number and base to which to convert | def main(destination_base, max_number, decimal_number):
if 2 <= destination_base <= 9:
if 0 <= decimal_number <= max_number:
converted_number = base_conversion(destination_base, decimal_number)
print(f"the converted number is: {converted_number}")
else:
print("invalid input for base 10 number")
else:
print("invalid input for destination base") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def decimal_number_input(decimal_number, destination_base, max_number):\n if 0 <= decimal_number <= int(max_number):\n print(f\"the converted number is: {base_conversion(destination_base, decimal_number)}\")\n else:\n print(\"invalid input\")\n return decimal_number",
"def base_converter(decimal_number, base):\n digits = \"0123456789ABCDEF\"\n quotient_stack = Stack()\n reminder = decimal_number\n while reminder > 0:\n quotient = reminder % base\n quotient_stack.push(quotient)\n reminder = reminder // base\n\n new_string = \"\"\n while not quotient_stack.is_empty():\n new_string = new_string + digits[quotient_stack.pop()]\n return new_string",
"def decimal_to_base(n, base):\n\n chars = \"0123456789ABCDEF\"\n stack = []\n is_negative = False\n\n if n < 0:\n n = abs(n)\n is_negative = True\n\n while n > 0:\n remainder = n % base\n stack.append(remainder)\n n = n // base\n\n result = \"\"\n\n while stack:\n result = result + chars[stack.pop()]\n\n if is_negative:\n return \"-\"+result\n else:\n return result",
"def base_conversion(destination_base, decimal_number):\n remainder_4 = decimal_number % destination_base\n remainder_3 = (decimal_number // destination_base) % destination_base\n remainder_2 = (decimal_number // destination_base // destination_base) % destination_base\n remainder_1 = (decimal_number // destination_base // destination_base // destination_base) % destination_base\n\n converted_number = str(remainder_1)+str(remainder_2)+str(remainder_3)+str(remainder_4)\n return converted_number",
"def main():\r\n num = enter_num()\r\n if num is not None:\r\n num_lst = mk_num_lst(num)\r\n dec = convert(num_lst)\r\n print(\"decimal value of BASE 36 number\", num, \"is\", dec)\r\n else:\r\n print(\"user terminated program\")",
"def to_base_ten(self, value, base):\r\n numeral = self.numeral\r\n\r\n if not 2 <= base <= 36:\r\n raise ValueError('Base must be between 2 and 36')\r\n \r\n x = str(value)\r\n if '.' in x: #If value has a fractional part\r\n int_result = 0\r\n frac_result = 0\r\n int_part, frac_part = x.split('.')[0], x.split('.')[1] #split the value at '.' to two parts and return tuple\r\n int_result += int(int_part, base) #performing addition to confirm result is an integer\r\n \r\n for i in range(1, len(frac_part)+ 1): #exponent for fractional part starts from -1 to -n\r\n try:\r\n frac_result += (int(frac_part[i-1]) / pow(base, i)) #If no alphabeth in fractional part\r\n except Exception:\r\n frac_result += (int(numeral.index(frac_part[i-1])) / pow(base, i)) #Else look up value in numeral\r\n \r\n return int_result + frac_result #The retured value is an integer\r\n\r\n else: #If value is a whole number\r\n return int(str(value), base)",
"def baseconvert(num, base):\n\n digits = \"0123456789abcdefghijklmnopqrstuvwxyz\"\n\n try:\n num = int(num)\n base = int(base)\n except ValueError:\n return \"\"\n\n if num < 0 or base < 2 or base > 36:\n return \"\"\n\n num_string = \"\"\n while 1:\n remainder = num % base\n num_string = digits[remainder] + num_string\n num = num / base\n if num == 0:\n break\n\n return num_string",
"def to_baseN(self, value, base, other_base = False):\r\n numeral = self.numeral\r\n \r\n if other_base: #If value is not in base 10\r\n conv_to_x = self.to_base_ten(value, other_base) #Use the above function to first convert to base 10.\r\n return self.to_baseN(conv_to_x, base) # Recursively convert from base 10 to the new base.\r\n\r\n else: # Since value supplied to this part is in decimal, we can work in base 10\r\n int_part = int(value) #Remove fractional part\r\n frac_part = value - int_part #Keep fractional part\r\n\r\n if value == 0:\r\n return \"0\"\r\n\r\n if int_part < 0:\r\n return '-' + self.to_baseN(abs(int_part), base, other_base) #for number < 0, work with its absolute form before adding -\r\n\r\n if not 2 <= base <= len(numeral):\r\n raise ValueError(f'Base must be between 2 and {len(numeral)}')\r\n \r\n int_result = \"-\" if int_part < 0 else \"\" #add - to negatiive numbers\r\n frac_result = \"\"\r\n\r\n while int_part != 0:\r\n int_result += numeral[int_part % base]\r\n int_part //= base\r\n\r\n while frac_part != 0:\r\n frac_result += numeral[int(frac_part * base)]\r\n frac_part = (frac_part * base) - int(frac_part * base)\r\n result = (int_result[::-1] + \".\" + frac_result[::1]) if frac_result else int_result[::-1]\r\n \r\n if result.startswith('.'):\r\n return \"0\" + result\r\n else:\r\n return result",
"def int_to_base(num, base):\n if num<=0: return '0' \n digits = []\n return ''.join(digits)",
"def int_to_base(num, base):\n if base<=0: return '0' \n digits = []\n if (num <0):\n \tcur= -num\n else: cur = num\n while(cur>0):\n\t\tdigits.append(str(cur%base))\n\t\tcur/=base\n if (num <0): digits.append('-')\n digits.reverse()\n\n \n \n return ''.join(digits)",
"def base_to_int(string, base):\n if string==\"0\" or base <= 0 : return 0 \n result = 0 \n return result",
"def convertbase(number, base=10):\n\n integer = number\n if not integer:\n return '0'\n sign = 1 if integer > 0 else -1\n alphanum = string.digits + string.ascii_lowercase\n nums = alphanum[:base]\n res = ''\n integer *= sign\n while integer:\n integer, mod = divmod(integer, base)\n res += nums[mod]\n return ('' if sign == 1 else '-') + res[::-1]",
"def fromDecToBase(number, base, digits):\n newNumber = \"\"\n while number > 0:\n rest = number % base\n newNumber = str(rest) + newNumber\n number -= rest\n number /= base\n \n return newNumber.zfill(digits)",
"def dec2base(n, base):\n convertstring = \"0123456789ABCDEF\"\n if n < base:\n return convertstring[n]\n else:\n return dec2base(n // base, base) + convertstring[n % base]",
"def int2base(x, base):\n digs = string.digits + string.ascii_lowercase\n if x < 0:\n sign = -1\n elif x == 0:\n return '0'\n else:\n sign = 1\n x *= sign\n digits = []\n while x:\n digits.append(digs[x % base])\n x //= base\n if sign < 0:\n digits.append('-')\n digits.reverse()\n return ''.join(digits)",
"def convert_base(num, to_base):\n\n\tdigits = '0123456789ABCDEF'\n\tresult = ''\n\n\tif num < to_base:\n\t\treturn digits[num]\n\telse:\n\t\tresult += convert_base(num/to_base, to_base) + str(digits[num % to_base])\n\n\treturn result",
"def standard_form(num: Decimal):\n num = Decimal(num)\n\n if num >= 1:\n power = 0\n while num > 1:\n power += 1\n num /= 10\n\n else:\n power = 0\n while num < 1:\n power -= 1\n num *= 10\n if num > 1:\n break\n power += 1\n\n return num, power",
"def convert(self, base):\n number = self.number\n converted_number = []\n while number:\n rest = number % base\n converted_number.append(rest)\n number = number / base\n return converted_number[::-1]",
"def baseConverter(number, base):\n\n digits = \"0123456789ABCDEF\"\n\n remainders = Stack()\n\n while number > 0:\n rem = number % base\n remainders.push(rem)\n number = number // base\n\n result = \"\"\n\n while not remainders.isEmpty():\n popped = remainders.pop()\n digit = digits[popped]\n result += str(digit)\n return result",
"def base_to_int(string, base):\n if string==\"0\" or base <= 0 : return 0 \n result = 0 \n flip = False;\n if string[0]=='-':\n \tflip=True;\n \tstring = string[1:]\n pow = len(string)-1\n for letr in string:\n \tletrNum = int(letr)\n \tresult+= letrNum*(base**pow)\n \tpow-=1\n if flip:\n \tresult= -result\n return result",
"def base(num,conv,rem=0,baseResult=[]):\r\n if num==0:\r\n strResult=''\r\n for i in baseResult[::-1]:\r\n strResult+=str(i)\r\n return int(strResult)\r\n else:\r\n baseResult.append(num%conv)\r\n return base(num//conv,conv,num%conv,baseResult)",
"def ftb_impl(numstr, from_base='10', to_base='16'):\n ENONALNUM = list(numstr + ' has a non alpha-numeric character')\n EFBDEC = list(from_base + ' is not decimal')\n ETBDEC = list(to_base + ' is not decimal')\n ENOTINFB = list(numstr + ' is not in base ' + from_base)\n E2TO36 = list('supported bases are >= 2 and <= 36')\n MAXBASE = 36\n MINBASE = 2\n numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G',\n 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\n try:\n # handle numstr sign\n numstrsign = 0\n if numstr[0] == '+':\n numstrsign = 1\n elif numstr[0] == '-':\n numstrsign = -1\n\n if numstrsign in (1, -1):\n numstr = numstr[1:]\n # end of handle numstr sign\n\n if from_base[0] == '+':\n from_base = from_base[1:]\n elif from_base[0] == '-':\n return E2TO36\n for char in from_base:\n if not str.isdigit(char):\n return EFBDEC\n from_base = int(from_base)\n\n for char in numstr:\n if not (str.isalnum(char) and char != '.'):\n return ENONALNUM\n if int(char, MAXBASE) >= from_base:\n return ENOTINFB\n\n if to_base[0] == '+':\n to_base = to_base[1:]\n elif to_base[0] == '-':\n return E2TO36\n for char in to_base:\n if not str.isdigit(char):\n return ETBDEC\n to_base = int(to_base)\n\n if from_base < MINBASE or from_base > MAXBASE \\\n or to_base < MINBASE or to_base > MAXBASE:\n return E2TO36\n\n numdec = int(numstr, from_base)\n\n result = []\n while numdec:\n result = [numdec % to_base] + result\n numdec = numdec // to_base\n\n for i in range(len(result)):\n char_idx = result[i]\n result[i] = numbers[result[i]]\n\n if numstrsign != 0:\n result = [str(numstrsign)] + result\n return result\n except UnicodeEncodeError as err:\n return list(str(err))",
"def base10_to_base10(value, old_base, new_base, power = 1, verbose = 0):\n if verbose > 1:\n print(\"UnitConversion.base10_to_base10()\") \n \n if old_base is None:\n warnings.warn(\"UnitConversion.base10_to_base10(): argument old_base is None.\")\n return None\n\n if new_base is None:\n warnings.warn(\"UnitConversion.base10_to_base10(): argument new_base is None.\")\n return None\n \n if type(power) != int:\n warnings.warn(\"UnitConversion.base10_to_base10(): argument power is not an integer. Continuing.\")\n \n if type(old_base) == str:\n old_base = prefix_names_to_base10(old_base)\n if type(new_base) == str:\n new_base = prefix_names_to_base10(new_base) \n \n old_base = old_base * power\n new_base = new_base * power\n \n return value * 10**old_base / 10**new_base",
"def destination_base_input(destination_base):\n if 2 <= destination_base <= 9:\n return destination_base\n else:\n print(\"invalid input\")",
"def temp_converter():\n degreeC = input(\"What degree in C do you want to convert to F? \")\n degreeF = int(degreeC) * 9 / 5 + 32\n print(\"\\nRobbie says:\\n\")\n print(\"I converted %s C in to %s F!\" % (degreeC, degreeF))",
"def get_user_input():\n return float(input('Your transaction amount please: '))",
"def number_format(num):\n while True:\n try:\n user_input = float(input(num))\n return user_input\n except ValueError:\n print(\"Error. Please enter the desired number. You may use \"\n \"decimals.\")\n except:\n print(\"Error: unknown.\")",
"def convertebasenbase10(baseorig, numero):\n base10 = 0\n for i in range(len(numero)-1, -1, -1):\n base10 += DIGITOS.index(numero[i]) * baseorig**(len(numero)-i-1)\n\n return base10",
"def to_base_10(number, base):\n\n number_in_base_10 = 0\n\n exp = len(number) - 1\n for digit in number:\n number_in_base_10 += get_number_from_character(digit) * pow(base, exp)\n exp -= 1\n\n return number_in_base_10",
"def dec_to_bin(num, base):\n\n # Divide number by base and keep track of remainder in a stack.\n # What is one of the key indicators that a stack should be used?\n # Reversability\n # The reversal property signals that a stack is likely the appropriate\n # data structure for solving the problem.\n\n result = \"\"\n remstack = Stack()\n digits = \"0123456789ABCDEF\"\n\n while num > 0:\n rem = num % base\n num = num // base\n remstack.push(rem)\n\n while not remstack.is_empty():\n result += digits[remstack.pop()]\n\n return result"
] | [
"0.76400197",
"0.69913197",
"0.67633325",
"0.67291844",
"0.6524661",
"0.65105337",
"0.6500028",
"0.6495628",
"0.64876056",
"0.6460031",
"0.6343496",
"0.63026404",
"0.6264527",
"0.6256234",
"0.623991",
"0.6216938",
"0.6164334",
"0.6153163",
"0.6125251",
"0.6108441",
"0.61037177",
"0.6062151",
"0.6009637",
"0.5975087",
"0.59602225",
"0.595439",
"0.5902032",
"0.5873838",
"0.5862863",
"0.5848287"
] | 0.7455561 | 1 |
helper function; decodes a section of the netloc from punycode. | def decode_punycode(label):
try:
return idna.decode(label.encode('ascii'))
except UnicodeError:
pass
except ValueError as exc:
# see https://github.com/john-kurkowski/tldextract/issues/122
# if "narrow Python build" in exc.args[0]:
# warnings.warn("can not decode punycode: %s" % exc.args[0], UnicodeWarning, stacklevel=2)
pass
# return label
# raise
return label | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def decode(data): #@NoSelf",
"def decode(data):\n raise NotImplementedError",
"def decode(self, code):\n raise NotImplementedError",
"def decode(self, shortUrl):\n return self.demap[shortUrl]",
"def decode(self, s):",
"def decode(self, s):",
"def decode_network_string(msgtype, plen, buf):\n return buf[header.size:plen - 1]",
"def _decode(self, rel_codes, anchors):\n pass",
"def decode(self, z):\n raise NotImplementedError",
"def decode(self, encoded):",
"def decode(self, shortUrl):\n return self.decode_map[shortUrl]",
"def decode(self, shortUrl):\n return self.decode_map[shortUrl]",
"def decode(self,buf):\n eth = dpkt.ethernet.Ethernet(buf)\n pkt_len = len(buf)\n if(eth.type== dpkt.ethernet.ETH_TYPE_IP):\n ip = eth.data\n dst_ip = socket.inet_ntoa(ip.dst)\n src_ip = socket.inet_ntoa(ip.src)\n octet_list = string.split(dst_ip,'.')\n broadcast = False\n for o in octet_list:\n if (o == \"255\"):\n broadcast = True\n break\n if((octet_list[0] == \"224\") or (octet_list[0] == \"239\")):\n broadcast = True #Its multicast actually.\n if not broadcast:\n if(ip.p == dpkt.ip.IP_PROTO_TCP):\n pass\n elif(ip.p == dpkt.ip.IP_PROTO_UDP):\n udp =ip.data\n if((udp.dport == 53) or (udp.sport == 53)): # A request. \n if(udp.dport == 53): # A request. \n return self.dns_handler.handle_dns_request(ip.src,ip.dst,ip.p,udp.sport,udp.dport,udp.data)\n if(udp.sport == 53): # A DNS response\n self.dns_handler.handle_dns_response(ip.src,ip.dst,ip.p,udp.sport,udp.dport,udp.data)\n else:\n pass",
"def decode_line(encoded):\n\n encoded_len = len(encoded)\n index = 0\n array = []\n lat = 0\n lng = 0\n\n while index < encoded_len:\n b = 0\n shift = 0\n result = 0\n\n while True:\n b = ord(encoded[index]) - 63\n index = index + 1\n result |= (b & 0x1f) << shift\n shift += 5\n if b < 0x20:\n break\n\n dlat = ~(result >> 1) if result & 1 else result >> 1\n lat += dlat\n\n shift = 0\n result = 0\n\n while True:\n b = ord(encoded[index]) - 63\n index = index + 1\n result |= (b & 0x1f) << shift\n shift += 5\n if b < 0x20:\n break\n\n dlng = ~(result >> 1) if result & 1 else result >> 1\n lng += dlng\n\n array.append((lat * 1e-5, lng * 1e-5))\n\n return array",
"def _decode_35701(data):\n start_byte = 0\n n_bytes = 2\n var_id = struct.unpack('<H', data[start_byte:start_byte + n_bytes])[0]\n if var_id == 29995:\n start_byte += n_bytes\n n_bytes = 4\n n_points = struct.unpack(\n '<I', data[start_byte:start_byte + n_bytes])[0]\n return {'n_points': n_points}",
"def decode_addr(self, addr):\n self._check_pid_wrap()\n # Find the binary that contains the specified address.\n # For .so files, look at the relative address; for the main\n # executable, look at the absolute address.\n for binary, (start, end) in self.code_ranges.items():\n if addr >= start and addr <= end:\n offset = addr - start \\\n if binary.endswith(\".so\") else addr\n return \"%s [%s]\" % (self._decode_sym(binary, offset),\n binary)\n return \"%x\" % addr",
"def decode(self, shortUrl):\n pass",
"def decodepkt(self, pkt):\n res = \"\"\n if pkt.startswith('$'):\n try:\n self.logger.debug('unpack< %s', pkt) \n res = self.unpack(pkt)\n except ValueError as ex:\n self.logger.debug('GDB-< %s', res)\n self.logger.warning('Bad packet %s', ex) \n self.s.send(b'-')\n else:\n self.s.send(b'+')\n self.logger.debug('GDB+< %s', res) \n return res\n else:\n self.logger.warning('discards %s', pkt)",
"def decode(cls, data: bytes):\n\n coordinate, = struct.unpack(Protocol.Formats.COORDINATE_FORMAT, data)\n column = coordinate % (2 ** Protocol.Formats.COORDINATE_DELIMITER)\n row = coordinate >> Protocol.Formats.COORDINATE_DELIMITER\n return cls(row=row, column=column)",
"def decode(self, eth):\n\t\tif eth.type == dpkt.ethernet.ETH_TYPE_ARP:\n\t\t\t# print 'arp'\n\t\t\treturn ARP(eth.data).get()\n\n\t\telif eth.type == dpkt.ethernet.ETH_TYPE_IP6:\n\t\t\tip = eth.data\n\t\t\tif ip.p == dpkt.ip.IP_PROTO_UDP:\n\t\t\t\tudp = ip.data\n\n\t\t\t\t# multicast is just like IPv4\n\t\t\t\tif udp.dport == 5353:\n\t\t\t\t\t# print udp\n\t\t\t\t\tans = mDNS(udp).get()\n\t\t\t\t\t# print 25*'='\n\t\t\t\t\t# pp.pprint(ans)\n\t\t\t\t\t# print 25*'='\n\t\t\t\t\treturn ans\n\n\t\t\t\t# print 'IPv6 UDP','port:',udp.dport,'src:',self.getip(ip.src,True),'dst:',self.getip(ip.dst,True)\n\n\t\t\t# TCP not useful\n\t\t\telif ip.p == dpkt.ip.IP_PROTO_TCP:\n\t\t\t\tpass\n\t\t\t\t# tcp = ip.data\n\t\t\t\t# print 'IPv6 TCP','port:',tcp.dport,'src:',self.getip(ip.src,True),'dst:',self.getip(ip.dst,True)\n\n\t\t\t# ICMP error msg not useful for mapping\n\t\t\telif ip.p == dpkt.ip.IP_PROTO_ICMP6:\n\t\t\t\t# print 'IPv6 icmp6:',ip.data.data\n\t\t\t\tpass\n\n\t\t\t# other stuff I haven't decoded\n\t\t\telse:\n\t\t\t\tpass\n\t\t\t\t# print 'IPv6',ip.p,'src:',self.getip(ip.src,True),'dst:',self.getip(ip.dst,True)\n\t\telif eth.type == dpkt.ethernet.ETH_TYPE_IP:\n\t\t\tip = eth.data\n\n\t\t\t# roku interface port: 1900 dst: 239.255.255.250 1900\n\t\t\tif ip.p == dpkt.ip.IP_PROTO_UDP:\n\t\t\t\tudp = ip.data\n\n\t\t\t\t# these aren't useful\n\t\t\t\tif udp.dport == 53: # DNS\n\t\t\t\t\t# return DNS(udp.data)\n\t\t\t\t\treturn {}\n\n\t\t\t\telif udp.dport == 5353: # mDNS\n\t\t\t\t\t# print 'mDNS'\n\t\t\t\t\t# print udp\n\t\t\t\t\treturn mDNS(udp).get()\n\n\t\t\t\telif self.getip(ip.dst) == '239.255.255.250':\n\t\t\t\t\treturn {}\n\n\t\t\t\telse:\n\t\t\t\t\t# don't print standard ports\n\t\t\t\t\t# 17500 dropbox\n\t\t\t\t\t# if not ip.data.dport in [17500]:\n\t\t\t\t\t# \tprint 'other udp','port:',udp.dport,'src:',self.getip(ip.src),'dst:',self.getip(ip.dst),': '\n\t\t\t\t\treturn {}\n\t\t\telif ip.p == dpkt.ip.IP_PROTO_TCP:\n\t\t\t\t# src = self.getip(ip.src)\n\t\t\t\t# if netaddr.IPAddress(src) not in netaddr.IPNetwork(\"192.168.1.0/24\"):\n\t\t\t\t# \twho = ''\n\t\t\t\t# \tif src not in self.ipMap:\n\t\t\t\t# \t\twho = WhoIs(src).record['NetName']\n\t\t\t\t# \t\tself.ipMap[src] = who\n\t\t\t\t# \telse:\n\t\t\t\t# \t\twho = self.ipMap[src]\n\t\t\t\t# \tif who in ['GOOGLE','AKAMAI','APPLE-WWNET','AMAZO-ZIAD1','DROPBOX']:\n\t\t\t\t# \t\treturn {}\n\t\t\t\t# \telse:\n\t\t\t\t# \t\tprint src,who\n\t\t\t\t# don't print standard ports\n\t\t\t\t# port 58969 - XSANS Apple, why do i see that?\n\t\t\t\t# 22 ssh\n\t\t\t\t# 25 smtp\n\t\t\t\t# 80 http\n\t\t\t\t# 123 time server\n\t\t\t\t# 143 imap\n\t\t\t\t# 443 https\n\t\t\t\t# 445 smb\n\t\t\t\t# 548 afp over tcp\n\t\t\t\t# 5009 airport admin utility\n\t\t\t\t# 5222 ichat\n\t\t\t\t# 17500 dropbox\n\t\t\t\t# if not ip.data.dport in [22,25,80,123,143,443,445,548,5009,5222,17500]:\n\t\t\t\t\t# print 'other tcp','port:',ip.data.dport,'src:',self.getip(ip.src),'dst:',self.getip(ip.dst)\n\t\t\t\treturn {}\n\t\t\t# elif ip.p == dpkt.ip.IP_PROTO_ICMP6:\n\t\t\t# \tprint '?????? other icmp6','src:',self.getip(ip.src),'dst:',self.getip(ip.dst)\n\t\t\telif ip.p == 2:\n\t\t\t\tpass\n\t\t\t\t# print 'IGMP','src:',self.getip(ip.src),'dst:',self.getip(ip.dst)\n\t\t\telse:\n\t\t\t\t# print 'other ip packet','src:',self.getip(ip.src),'dst:',self.getip(ip.dst)\n\t\t\t\treturn {}",
"def decodeAddress(self, value: long, useMemorySegmentation: bool) -> ghidra.program.model.address.Address:\n ...",
"def decode_network_number(ptype, plen, buf):\n return number.unpack_from(buf, header.size)[0]",
"def _dinamic_decode(self):\n raise NotImplementedError",
"def decode(decode_format):\n return output_from_decode",
"def decode(self, shortUrl):\n shortUrl = shortUrl[-6:]\n if shortUrl in self.short_to_long:\n return self.short_to_long[shortUrl]",
"def decode_raw(data):\n return RawWire().decode(data)",
"def decode(encoded):\n #six degrees of precision in valhalla\n inv = 1.0 / 1e6;\n \n decoded = []\n previous = [0,0]\n i = 0\n #for each byte\n while i < len(encoded):\n #for each coord (lat, lon)\n ll = [0,0]\n for j in [0, 1]:\n shift = 0\n byte = 0x20\n #keep decoding bytes until you have this coord\n while byte >= 0x20:\n byte = ord(encoded[i]) - 63\n i += 1\n ll[j] |= (byte & 0x1f) << shift\n shift += 5\n #get the final value adding the previous offset and remember it for the next\n ll[j] = previous[j] + (~(ll[j] >> 1) if ll[j] & 1 else (ll[j] >> 1))\n previous[j] = ll[j]\n #scale by the precision and chop off long coords also flip the positions so\n #its the far more standard lon,lat instead of lat,lon\n decoded.append([float('%.6f' % (ll[1] * inv)), float('%.6f' % (ll[0] * inv))])\n #hand back the list of coordinates\n return decoded",
"def decode(hrp, addr):\n hrpgot, data = bech32_decode(addr)\n if hrpgot != hrp:\n return (None, None)\n decoded = convertbits(data[1:], 5, 8, False)\n if decoded is None or len(decoded) < 2 or len(decoded) > 40:\n return (None, None)\n if data[0] > 16:\n return (None, None)\n if data[0] == 0 and len(decoded) != 20 and len(decoded) != 32:\n return (None, None)\n return (data[0], decoded)",
"def decode(self, value):\r\n pass",
"def _decode_data(self, data):\r\n return data.decode('ISO-8859-1')"
] | [
"0.6572104",
"0.61086315",
"0.59683406",
"0.5781539",
"0.5771769",
"0.5771769",
"0.57637405",
"0.57140577",
"0.5689829",
"0.5652979",
"0.5637785",
"0.5637785",
"0.56200993",
"0.5609995",
"0.55943227",
"0.5592023",
"0.55431294",
"0.5532244",
"0.54975724",
"0.54969454",
"0.5494089",
"0.54514223",
"0.54370457",
"0.54345477",
"0.543151",
"0.54081243",
"0.5372327",
"0.5370585",
"0.5345529",
"0.53283983"
] | 0.6290453 | 1 |
Resize colour images to the required scales and augment if required We create the color_aug object in advance and apply the same augmentation to all images in this item. This ensures that all images input to the pose network receive the same augmentation. | def preprocess(self, inputs, color_aug):
for k in list(inputs):
if "color" in k:
n, im, i = k
inputs[(n, im, 0)] = self.resize(inputs[(n, im, - 1)])
for k in list(inputs):
if "color" in k:
f = inputs[k]
n, im, i = k
inputs[(n, im, i)] = self.to_tensor(f)
if i == 0:
inputs[(n + "_aug", im, i)] = self.to_tensor(color_aug(f)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def img_recolor(self, args, input_image_path):\n \n ec = encoder.Encoder(output_path=args.intermediate_representation, method=args.method,\n size=args.size, p=args.p, grid_size=args.grid_size, plot=args.plot, quantize=args.quantize)\n dc = decoder.Decoder(output_path=args.output_path, method=args.method, size=args.size, p=args.p, gpu_id=args.gpu_id, plot=args.plot)\n\n ec.encode(input_image_path)\n img_gray_name = ar_utils.gen_new_gray_filename(input_image_path)\n img_gray_path = os.path.join(args.intermediate_representation, img_gray_name)\n dc.decode(img_gray_path)\n\n if args.delete_gray and os.path.exists(img_gray_path):\n os.remove(img_gray_path)",
"def image_augmentations(\n image,\n data_augmentations,\n model_input_image_size,\n label=None):\n if image.get_shape() == None:\n im_size = model_input_image_size\n else:\n im_size = image.get_shape().as_list()\n im_size_check = True # np.any(\n # np.less_equal(\n # model_input_image_size[:2],\n # im_size[:2]))\n if data_augmentations is not None:\n for aug in data_augmentations:\n # Pixel/image-level augmentations\n if aug == 'image_float32':\n image = tf.cast(image, tf.float32)\n if aug == 'label_float32':\n label = tf.cast(label, tf.float32)\n if aug == 'bfloat16':\n image = tf.cast(image, tf.bfloat16)\n if aug == 'singleton':\n image = tf.expand_dims(image, axis=-1)\n print 'Adding singleton dimension to image.'\n if aug == 'sgl_label' or aug == 'singleton_label':\n label = tf.expand_dims(label, axis=-1)\n print 'Adding singleton dimension to label.'\n if aug == 'coco_labels':\n label = tf.nn.relu(label - 91)\n if aug == 'contrastive_loss':\n label = tf.stack(\n [tf.ones_like(label), tf.zeros_like(label)], -1)\n if aug == 'bsds_normalize':\n data = np.load(\n '/media/data_cifs/image_datasets/BSDS500/images/train/file_paths.npz')\n mean = data['mean'].squeeze(0)\n stds = data['stds'].squeeze(0)\n image = (image - mean) / stds\n if aug == 'bsds_crop' and im_size_check:\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n scale_choices = tf.convert_to_tensor(\n # [1. / 2., 1.1 / 2., 1.2 / 2.])\n [1., 1, 1.1, 1.2])\n samples = tf.multinomial(\n tf.log([tf.ones_like(scale_choices)]), 1)\n image_shape = image.get_shape().as_list()\n scale = scale_choices[tf.cast(samples[0][0], tf.int32)]\n scale_tf = tf.cast(\n tf.round(\n np.asarray(\n image_shape[:2]).astype(\n np.float32) * scale),\n tf.int32)\n combined = tf.concat([image, label], axis=-1)\n combo_shape = combined.get_shape().as_list()\n combined_resize = tf.squeeze(\n tf.image.resize_nearest_neighbor(\n tf.expand_dims(combined, axis=0),\n scale_tf,\n align_corners=True),\n axis=0)\n combined_crop = tf.random_crop(\n combined_resize,\n tf.concat(\n [model_input_image_size[:2], [combo_shape[-1]]], 0))\n image = combined_crop[:, :, :image_shape[-1]]\n label = combined_crop[:, :, image_shape[-1]:]\n image.set_shape(model_input_image_size)\n label.set_shape(\n model_input_image_size[:2] + [\n combo_shape[-1] - model_input_image_size[-1]])\n print 'Applying BSDS crop.'\n if aug == 'hed_resize' and im_size_check:\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n scale_choices = tf.convert_to_tensor(\n # [1. / 2., 1.1 / 2., 1.2 / 2.])\n np.arange(1, 1.51, 0.1)) # 0.7, 1.5\n samples = tf.multinomial(\n tf.log([tf.ones_like(scale_choices)]), 1)\n image_shape = image.get_shape().as_list()\n scale = scale_choices[tf.cast(samples[0][0], tf.int32)]\n scale_tf = tf.cast(\n tf.round(\n np.asarray(\n image_shape[:2]).astype(\n np.float32) * scale),\n tf.int32)\n combined = tf.concat([image, label], axis=-1)\n combo_shape = combined.get_shape().as_list()\n combined_resize = tf.squeeze(\n tf.image.resize_bilinear(\n tf.expand_dims(combined, axis=0),\n scale_tf,\n align_corners=True),\n axis=0)\n print 'Applying HED resize.'\n if aug == 'uint8_rescale':\n image = tf.cast(image, tf.float32) / 255.\n print 'Applying uint8 rescale to the image.'\n if aug == 'cube_plus_rescale':\n image = tf.cast(image, tf.float32) / 13273.\n print 'Applying uint8 rescale to the image.'\n if aug == 'uint8_rescale_label':\n label = tf.cast(label, tf.float32) / 255.\n print 'Applying uint8 rescale to the label.'\n if aug == 'uint8_rescale_-1_1':\n image = 2 * (tf.cast(image, tf.float32) / 255.) - 1\n print 'Applying uint8 rescale.'\n if aug == 'image_to_bgr':\n image = tf.stack(\n [image[..., 2], image[..., 1], image[..., 0]], axis=-1)\n if aug == 'pascal_normalize':\n image = image - [123.68, 116.78, 103.94]\n if aug == 'ilsvrc12_normalize':\n MEAN_RGB = [0.485 * 255, 0.456 * 255, 0.406 * 255]\n STDDEV_RGB = [0.229 * 255, 0.224 * 255, 0.225 * 255]\n image = (image - MEAN_RGB) / STDDEV_RGB\n if aug == 'random_contrast':\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n image = tf.image.random_contrast(image, lower=0.2, upper=1.8)\n print 'Applying random contrast.'\n if aug == 'random_brightness':\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n image = tf.image.random_brightness(image, max_delta=63.)\n print 'Applying random brightness.'\n if aug == 'grayscale' and im_size_check:\n # image = tf.image.rgb_to_grayscale(image)\n if len(image.get_shape().as_list()) == 2:\n image = tf.expand_dims(image, axis=-1)\n else:\n image = tf.expand_dims(image[..., 0], axis=-1)\n print 'Converting to grayscale.'\n if aug == 'rgb2gray' and im_size_check:\n image = tf.image.rgb_to_grayscale(image)\n print 'Converting rgb2gray.'\n if aug == 'clip_uint8' and im_size_check:\n image = tf.minimum(image, 255.)\n image = tf.maximum(image, 0.)\n if aug == 'cube_plus_crop':\n image = cube_plus_crop(image, model_input_image_size)\n # Affine augmentations\n if aug == 'rotate' and im_size_check:\n max_theta = 22.\n angle_rad = (max_theta / 180.) * math.pi\n angles = tf.random_uniform([], -angle_rad, angle_rad)\n transform = tf.contrib.image.angles_to_projective_transforms(\n angles,\n im_size[0],\n im_size[1])\n image = tf.contrib.image.transform(\n image,\n tf.contrib.image.compose_transforms(transform),\n interpolation='BILINEAR') # or 'NEAREST'\n print 'Applying random rotate.'\n if aug == 'rotate90' and im_size_check:\n image = tf.image.rot90(\n image,\n tf.random_uniform(\n shape=[],\n minval=0,\n maxval=4,\n dtype=tf.int32))\n print 'Applying random 90 degree rotate.'\n if aug == 'rotate90_image_label' and im_size_check:\n concat = tf.image.rot90(\n tf.concat([image, label], -1),\n tf.random_uniform(\n shape=[],\n minval=0,\n maxval=4,\n dtype=tf.int32))\n image = concat[..., :im_size[-1]]\n label = concat[..., im_size[-1]:]\n print 'Applying random 90 degree rotate to images and labels.'\n if aug == 'stack3d':\n image = tf.concat([image, image, image], axis=-1)\n if aug == 'rot_image_label' and im_size_check:\n max_theta = 30.\n angle_rad = (max_theta / 180.) * math.pi\n angles = tf.random_uniform([], -angle_rad, angle_rad)\n transform = tf.contrib.image.angles_to_projective_transforms(\n angles,\n im_size[0],\n im_size[1])\n image = tf.contrib.image.transform(\n image,\n tf.contrib.image.compose_transforms(transform),\n interpolation='BILINEAR') # or 'NEAREST'\n label = tf.contrib.image.transform(\n label,\n tf.contrib.image.compose_transforms(transform),\n interpolation='BILINEAR') # or 'NEAREST'\n print 'Applying random rotate.'\n if aug == 'random_scale_crop_image_label'\\\n and im_size_check:\n scale_choices = tf.convert_to_tensor(\n [1., 1.04, 1.08, 1.12, 1.16])\n samples = tf.multinomial(\n tf.log([tf.ones_like(scale_choices)]), 1)\n image_shape = image.get_shape().as_list()\n scale = scale_choices[tf.cast(samples[0][0], tf.int32)]\n scale_tf = tf.cast(\n tf.round(\n np.asarray(\n model_input_image_size[:2]).astype(\n np.float32) * scale),\n tf.int32)\n combined = tf.concat([image, label], axis=-1)\n combo_shape = combined.get_shape().as_list()\n combined_resize = tf.squeeze(\n tf.image.resize_bicubic(\n tf.expand_dims(combined, axis=0),\n scale_tf,\n align_corners=True),\n axis=0)\n combined_crop = tf.random_crop(\n combined_resize, tf.concat(\n [model_input_image_size[:2], [combo_shape[-1]]], 0))\n image = combined_crop[:, :, :image_shape[-1]]\n label = combined_crop[:, :, image_shape[-1]:]\n image.set_shape(model_input_image_size)\n label.set_shape(\n model_input_image_size[:2] + [\n combo_shape[-1] - model_input_image_size[-1]])\n if aug == 'rc_res' and im_size_check:\n image = random_crop(image, model_input_image_size)\n if len(model_input_image_size) > 2:\n model_input_image_size = model_input_image_size[:2]\n ms = [x // 2 for x in model_input_image_size]\n image = resize_image_label(\n im=image,\n model_input_image_size=ms,\n f='bicubic')\n print 'Applying random crop and resize.'\n if aug == 'cc_res' and im_size_check:\n image = center_crop(image, model_input_image_size)\n if len(model_input_image_size) > 2:\n model_input_image_size = model_input_image_size[:2]\n ms = [x // 2 for x in model_input_image_size]\n image = resize_image_label(\n im=image,\n model_input_image_size=ms,\n f='bicubic')\n print 'Applying center crop and resize.'\n if aug == 'random_crop' and im_size_check:\n image = random_crop(image, model_input_image_size)\n print 'Applying random crop.'\n if aug == 'center_crop' and im_size_check:\n image = center_crop(image, model_input_image_size)\n print 'Applying center crop.'\n if aug == 'rc_image_label' and im_size_check:\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n image, label = crop_image_label(\n image=image,\n label=label,\n size=model_input_image_size,\n crop='random')\n if aug == 'cc_image_label' and im_size_check:\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n image, label = crop_image_label(\n image=image,\n label=label,\n size=model_input_image_size,\n crop='center')\n if aug == 'resize' and im_size_check:\n if len(model_input_image_size) > 2:\n model_input_image_size = model_input_image_size[:2]\n image = resize_image_label(\n im=image,\n model_input_image_size=model_input_image_size,\n f='bicubic')\n print 'Applying area resize.'\n if aug == 'jk_resize' and im_size_check:\n if len(model_input_image_size) > 2:\n model_input_image_size = model_input_image_size[:2]\n image = tf.image.resize_image_with_crop_or_pad(\n image,\n model_input_image_size[0],\n model_input_image_size[1])\n print 'Applying area resize.'\n if aug == 'random_crop_and_res_cube_plus' and im_size_check:\n im_shape = image.get_shape().as_list()\n im_shape[0] /= 4\n im_shape[1] /= 4\n image = resize_image_label(\n im=image,\n model_input_image_size=im_shape[:2],\n f='bicubic')\n image = random_crop(image, model_input_image_size)\n if aug == 'center_crop_and_res_cube_plus' and im_size_check:\n im_shape = image.get_shape().as_list()\n im_shape[0] /= 4\n im_shape[1] /= 4\n image = resize_image_label(\n im=image,\n model_input_image_size=im_shape[:2],\n f='bicubic')\n image = center_crop(image, model_input_image_size)\n if aug == 'res_and_crop' and im_size_check:\n model_input_image_size_1 = np.asarray(\n model_input_image_size[:2]) + 28\n image = resize_image_label(\n im=image,\n model_input_image_size=model_input_image_size_1,\n f='area')\n image = center_crop(image, model_input_image_size)\n print 'Applying area resize.'\n if aug == 'res_nn' and im_size_check:\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n if len(model_input_image_size) > 2:\n model_input_image_size = model_input_image_size[:2]\n image = resize_image_label(\n im=image,\n model_input_image_size=model_input_image_size,\n f='nearest')\n print 'Applying nearest resize.'\n if aug == 'res_image_label' and im_size_check:\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n if len(model_input_image_size) > 2:\n model_input_image_size = model_input_image_size[:2]\n image = resize_image_label(\n im=image,\n model_input_image_size=model_input_image_size,\n f='bicubic')\n label = resize_image_label(\n im=label,\n model_input_image_size=model_input_image_size,\n f='bicubic')\n print 'Applying bilinear resize.'\n if aug == 'res_nn_image_label' and im_size_check:\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n if len(model_input_image_size) > 2:\n model_input_image_size = model_input_image_size[:2]\n image = resize_image_label(\n im=image,\n model_input_image_size=model_input_image_size,\n f='nearest')\n label = resize_image_label(\n im=label,\n model_input_image_size=model_input_image_size,\n f='nearest')\n print 'Applying nearest resize.'\n if aug == 'left_right':\n image = image_flip(image, direction='left_right')\n print 'Applying random flip left-right.'\n if aug == 'up_down':\n image = image_flip(image, direction='up_down')\n print 'Applying random flip up-down.'\n if aug == 'lr_viz_flip':\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n image, label = lr_viz_flip(image, label)\n image, label = ud_viz_flip(image, label)\n if aug == 'lr_flip_image_label':\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n image, label = lr_flip_image_label(image, label)\n if aug == 'ud_flip_image_label':\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n image, label = ud_flip_image_label(image, label)\n if aug == 'gratings_modulate':\n modulate = 10\n image //= modulate\n offset = (255 / 2) - ((255 / modulate) / 2)\n image += offset\n if aug == 'gaussian_noise':\n im_shape = image.get_shape().as_list()\n assert len(im_shape) == 3, '4D not implemented yet.'\n sigma = 1. / 10.\n mu = 0.\n image = image + tf.random_normal(\n im_shape,\n mean=mu,\n stddev=sigma)\n print 'Applying gaussian noise.'\n if aug == 'gaussian_noise_small':\n im_shape = image.get_shape().as_list()\n assert len(im_shape) == 3, '4D not implemented yet.'\n sigma = 1. / 20.\n mu = 0.\n image = image + tf.random_normal(\n im_shape,\n mean=mu,\n stddev=sigma)\n print 'Applying gaussian noise.'\n if aug == 'mixup':\n raise RuntimeError('Mixup not properly implemented yet.')\n alpha = 0.4\n dist = tf.distributions.Beta(alpha, alpha)\n image = image * dist + (1 - dist) * tf.roll(image, 0, 1)\n label = label * dist + (1 - dist) * tf.roll(label, 0, 1)\n if aug == 'hed_brightness':\n image = tf.image.random_brightness(image, 63)\n if aug == 'hed_contrast':\n image = tf.image.random_contrast(image, lower=0.4, upper=1.5)\n if aug == 'blur_labels':\n label = tf_blur(\n image=label,\n kernel_size=3, # extent\n name='label_blur',\n normalize=True,\n sigma=1.)\n if aug == 'calculate_rate_time_crop':\n im_shape = image.get_shape().as_list()\n minval = im_shape[0] // 3\n time_crop = tf.random_uniform(\n [],\n minval=minval,\n maxval=im_shape[0],\n dtype=tf.int32)\n\n # For now always pull from the beginning\n indices = tf.range(0, time_crop, dtype=tf.int32)\n selected_image = tf.gather(image, indices)\n padded_image = tf.zeros(\n [im_shape[0] - time_crop] + im_shape[1:],\n dtype=selected_image.dtype)\n\n # Randomly concatenate pad to front or back\n image = tf.cond(\n pred=tf.greater(\n tf.random_uniform(\n [],\n minval=0,\n maxval=1,\n dtype=tf.float32),\n 0.5),\n true_fn=lambda: tf.concat(\n [selected_image, padded_image], axis=0),\n false_fn=lambda: tf.concat(\n [padded_image, selected_image], axis=0)\n )\n image.set_shape(im_shape)\n\n # Convert label to rate\n label = label / im_shape[0]\n if aug == 'calculate_rate':\n label = label / image.get_shape().as_list()[0]\n print 'Applying rate transformation.'\n if aug == 'threshold':\n image = tf.cast(tf.greater(image, 0.1), tf.float32)\n print 'Applying threshold.'\n if aug == 'nonzero_label':\n label = tf.cast(tf.greater(label, 0.2), tf.float32)\n print 'Applying threshold.'\n if aug == 'zero_one':\n image = tf.minimum(tf.maximum(image, 0.), 1.)\n print 'Applying threshold.'\n if aug == 'timestep_duplication':\n image = tf.stack([image for iid in range(7)])\n print 'Applying timestep duplication.'\n if aug == 'per_image_standardization':\n image = tf.image.per_image_standardization(image)\n print 'Applying per-image zscore.'\n if aug == 'flip_image_polarity':\n image = tf.abs(image - 1.)\n if aug == 'flip_label_polarity':\n label = tf.abs(label - 1.)\n if aug == 'NCHW':\n image = tf.transpose(image, (2, 0, 1))\n if aug == 'bfloat16_image':\n image = tf.cast(image, tf.bfloat16)\n if aug == 'bfloat16_label':\n label = tf.cast(label, tf.bfloat16)\n if aug == 'hfloat16_image':\n image = tf.cast(image, tf.float16)\n if aug == 'hfloat16_label':\n label = tf.cast(label, tf.float16)\n if aug == 'threshold_label':\n label = tf.cast(tf.greater(label, 0.999), tf.float32)\n print 'Applying threshold of 0.999 to the label.'\n if aug == 'threshold_label_255':\n # cABC label = tf.cast(tf.greater(label, 200), tf.float32)\n label = tf.cast(tf.greater(label, 10), tf.float32)\n print 'Applying threshold of 127.5 to the label.'\n if aug == 'normalize_label':\n label = tf.cast(label, tf.float32)\n label = label / tf.reduce_max(label) # tf.cast(tf.greater(label, 25), tf.float32)\n print 'Normalizing label to [0, 1].'\n if aug == 'scale_to_255':\n image = image * 255.\n if aug == 'clip_255':\n image = tf.maximum(tf.minimum(255., image), 0.)\n # else:\n # assert len(image.get_shape()) == 3, '4D not implemented yet.'\n # image = tf.image.resize_image_with_crop_or_pad(\n # image, model_input_image_size[0], model_input_image_size[1])\n return image, label",
"def __enhance_image(self, img):\n\n blue = self.g.clahe.apply(img[:,:,0])\n green = self.g.clahe.apply(img[:,:,1])\n red = self.g.clahe.apply(img[:,:,2])\n img[:,:,0] = blue\n img[:,:,1] = green\n img[:,:,2] = red\n return img",
"def preprocess_image(self, batched_inputs):\n images = [x[\"image\"].to(self.device) for x in batched_inputs]\n images_aug = [x[\"image_color\"].to(self.device) for x in batched_inputs]\n\n images = [self.normalizer(x) for x in images]\n images_aug = [self.normalizer(x) for x in images_aug]\n\n images = ImageList.from_tensors(images,\n self.backbone.size_divisibility)\n images_aug = ImageList.from_tensors(images_aug,\n self.backbone.size_divisibility)\n return images, images_aug",
"def colorize_images(self, img):\n self.load_model()\n self.mdn.eval()\n self.vae.eval()\n n, _, _ = img.shape\n img = img.astype(np.float32) / 255\n img = torch.tensor(img, dtype=torch.float, device=self.device).unsqueeze(1)\n with torch.no_grad():\n z = self.mdn(img)\n ab_out = self.vae.decode(z)\n lab_out = torch.cat((img, ab_out), dim=1)\n lab_out = self.unnormalize(lab_out).cpu().numpy()\n lab_out = np.transpose(lab_out, (0, 2, 3, 1)).astype(np.uint8)\n for i in range(n):\n color_out = cv2.cvtColor(lab_out[i], cv2.COLOR_LAB2BGR)\n color_out = cv2.resize(color_out, (96, 96), interpolation=cv2.INTER_AREA)\n cv2.imwrite(\"../datasets/stl10/divcolor/{}.png\".format(str(i)), color_out)\n return",
"def cmp_data_aug_image(train_dataset, train_dir):\n target_class = random.choice(train_dataset.class_names)\n target_dir = train_dir + '/' + target_class\n random_image = random.choice(os.listdir(target_dir))\n random_image_path = target_dir + '/' + random_image\n print(random_image_path)\n\n # Read and plot in the random image\n img = mpimg.imread(random_image_path)\n plt.imshow(img)\n plt.title(f\"Original Image from class: {target_class}\")\n plt.axis(False)\n\n # Now let's plot our augmented random image\n augmented_img = data_augmentation(tf.expand_dims(img, axis=0))\n plt.figure()\n plt.imshow(tf.squeeze(augmented_img/255.)) #Invalid shape (1, 553, 440, 3) for image data - squeezed after getting this error\n plt.title(f\"Augmented Image from class: {target_class}\")\n plt.axis(False)",
"def prepare(img,\n resize=False, new_size=(64, 64),\n apply_contrast=False, contrast_channels=(0, 1, 2)\n ):\n new_img = crop(img)\n\n if resize:\n new_img = cv2.resize(new_img, new_size)\n\n if apply_contrast:\n new_img = increase_contrast(new_img, channels=contrast_channels)\n\n return new_img",
"def _adjust_color_img(self, results, factor=1.0):\n for key in results.get('img_fields', ['image']):\n # NOTE defaultly the image should be BGR format\n img = results[key]\n results[key] = mmcv.adjust_color(img, factor).astype(img.dtype)",
"def colorize(image, newColor):\n image = image.copy()\n\n # zero out RGB values\n image.fill((0, 0, 0, 255), None, pg.BLEND_RGBA_MULT)\n # add in new RGB values\n image.fill(newColor[0:3] + (0,), None, pg.BLEND_RGBA_ADD)\n\n return image",
"def __call__(self, in_data):\n # There are five data augmentation steps\n # 1. Color augmentation\n # 2. Random expansion\n # 3. Random cropping\n # 4. Resizing with random interpolation\n # 5. Random horizontal flipping\n if self.count % 10 == 0 and self.count % self.batchsize == 0 and self.count != 0:\n self.i += 1\n i = self.i % len(self.dim)\n self.output_shape = (self.dim[i], self.dim[i])\n # print(self.count, self.i, self.output_shape)\n self.count += 1\n\n img, bbox, label = in_data\n\n # 1. Color augmentation\n img = random_distort(img, brightness_delta=32,\n contrast_low=0.5, contrast_high=1.5,\n saturation_low=0.5, saturation_high=1.5,\n hue_delta=25)\n\n # Normalize. range is [0, 1]\n img /= 255.0\n\n _, H, W = img.shape\n scale = np.random.uniform(0.25, 2)\n random_expand = np.random.uniform(0.8, 1.2, 2)\n net_h, net_w = self.output_shape\n out_h = net_h * scale # random_expand[0]\n out_w = net_w * scale # random_expand[1]\n if H > W:\n out_w = out_h * (float(W) / H) * np.random.uniform(0.8, 1.2)\n elif H < W:\n out_h = out_w * (float(H) / W) * np.random.uniform(0.8, 1.2)\n\n out_h = int(out_h)\n out_w = int(out_w)\n\n img = resize_with_random_interpolation(img, (out_h, out_w))\n bbox = transforms.resize_bbox(bbox, (H, W), (out_h, out_w))\n\n if out_h < net_h and out_w < net_w:\n img, param = expand(img, out_h=net_h, out_w=net_w,\n fill=self.value, return_param=True)\n bbox = transforms.translate_bbox(\n bbox, y_offset=param['y_offset'], x_offset=param['x_offset'])\n else:\n out_h = net_h if net_h > out_h else int(out_h * 1.05)\n out_w = net_w if net_w > out_w else int(out_w * 1.05)\n img, param = expand(img, out_h=out_h, out_w=out_w,\n fill=self.value, return_param=True)\n bbox = transforms.translate_bbox(\n bbox, y_offset=param['y_offset'], x_offset=param['x_offset'])\n\n img, param = crop_with_bbox_constraints(\n img, bbox, return_param=True,\n crop_height=net_h, crop_width=net_w)\n bbox, param = transforms.crop_bbox(\n bbox, y_slice=param['y_slice'], x_slice=param['x_slice'],\n allow_outside_center=False, return_param=True)\n label = label[param['index']]\n\n\n # 5. Random horizontal flipping # OK\n img, params = transforms.random_flip(\n img, x_random=True, return_param=True)\n bbox = transforms.flip_bbox(\n bbox, self.output_shape, x_flip=params['x_flip'])\n\n # Preparation for Yolov2 network\n bbox[:, ::2] /= self.output_shape[0] # y\n bbox[:, 1::2] /= self.output_shape[1] # x\n\n num_bbox = len(bbox)\n len_max = max(num_bbox, self.max_target)\n\n gmap = create_map_anchor_gt(bbox, self.anchors, self.output_shape,\n self.downscale, self.n_boxes, len_max)\n\n out_bbox = np.zeros((len_max, 4), dtype='f')\n out_bbox[:num_bbox] = bbox[:num_bbox]\n out_label = np.zeros((len_max), dtype='i')\n out_label[:num_bbox] = label\n\n gmap = gmap[:self.max_target]\n out_bbox = out_bbox[:self.max_target]\n out_label = out_label[:self.max_target]\n num_array = min(num_bbox, self.max_target)\n\n img = np.clip(img, 0, 1)\n return img, out_bbox, out_label, gmap, np.array([num_array], dtype='i')",
"def preprocess_train(im, boxes, classes, inst_masks, mask, input_size, min_size=2,\n use_augment=False, training_scale=[0.3, 0.5, 0.7, 1.0]):\n ori_im = np.copy(im)\n target_h, target_w = input_size\n\n # ---------- old data_augmentation ----------\n if use_augment:\n if np.random.choice([0, 1]) != 0:\n scale = np.random.choice(training_scale) # adding more small objects\n im, inst_masks, mask, boxes, classes = random_scale(im, inst_masks, mask, boxes, classes, scale=scale)\n min_obj_cover = np.random.choice([0.8, 0.9, 1.0])\n # truncted examples may lead to multiple-detections..\n im, inst_masks, mask, boxes, classes = random_aspect_ratio(im, inst_masks, mask, boxes, classes,\n min_aspect_ratio=0.5, max_aspect_ratio=2.0,\n min_obj_cover=min_obj_cover)\n #\n # # r = np.random.randint(0, 3)\n # if np.random.rand() < 0.75:\n # im, inst_masks, mask, boxes, classes = fixed_scale(im, inst_masks, mask, boxes, classes, target_h, target_w)\n # else:\n # im, inst_masks, mask, boxes, classes = center_crop2fixed_pad(im, inst_masks, mask, boxes, classes, target_w, target_h,\n # min_size=min_size)\n\n # ---------- old data_augmentation ----------\n\n # ---------- none data_augmentation ----------\n im, inst_masks, mask, boxes, classes = fixed_scale(im, inst_masks, mask, boxes, classes, target_h, target_w)\n im, inst_masks, mask, boxes, classes = random_flip(im, inst_masks, mask, boxes, classes)\n # ---------- none data_augmentation ----------\n\n # ---------- old data_augmentation ----------\n im = distort_color(im)\n # ---------- old data_augmentation ----------\n\n im = imcv2_recolor(im)\n\n # add this because zeros numpy array will cause errors in torch Dataloader\n inst_masks = np.zeros([1, target_h, target_w], dtype=inst_masks.dtype) if inst_masks.size == 0 else inst_masks\n\n boxes = np.asarray(boxes, dtype=np.float32)\n return im, boxes, classes, inst_masks, mask, ori_im",
"def autoscale_colors_for_png(arr, vmin=None, vmax=None):\n image_mode = _get_image_type_from_array(arr)\n\n if vmin is None:\n vmin = np.min(arr)\n if vmax is None:\n vmax = np.max(arr)\n\n # In cases where all elements are the same, fix the vmax so that even though\n # the whole image will be black, the user can at least see the shape\n if vmin == vmax:\n vmax = vmin + 1\n\n scaled = scale_colors_for_png(arr, vmin=vmin, vmax=vmax)\n return scaled, image_mode",
"def preprocess(\n self,\n images: ImageInput,\n do_resize: bool = None,\n size: Dict[str, int] = None,\n resample: PILImageResampling = None,\n do_normalize: bool = None,\n do_color_quantize: Optional[bool] = None,\n clusters: Optional[Union[List[List[int]], np.ndarray]] = None,\n return_tensors: Optional[Union[str, TensorType]] = None,\n data_format: Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n **kwargs,\n ) -> PIL.Image.Image:\n do_resize = do_resize if do_resize is not None else self.do_resize\n size = size if size is not None else self.size\n size = get_size_dict(size)\n resample = resample if resample is not None else self.resample\n do_normalize = do_normalize if do_normalize is not None else self.do_normalize\n do_color_quantize = do_color_quantize if do_color_quantize is not None else self.do_color_quantize\n clusters = clusters if clusters is not None else self.clusters\n clusters = np.array(clusters)\n\n images = make_list_of_images(images)\n\n if not valid_images(images):\n raise ValueError(\n \"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, \"\n \"torch.Tensor, tf.Tensor or jax.ndarray.\"\n )\n\n if do_resize and size is None or resample is None:\n raise ValueError(\"Size and resample must be specified if do_resize is True.\")\n\n if do_color_quantize and clusters is None:\n raise ValueError(\"Clusters must be specified if do_color_quantize is True.\")\n\n # All transformations expect numpy arrays.\n images = [to_numpy_array(image) for image in images]\n\n if is_scaled_image(images[0]) and do_normalize:\n logger.warning_once(\n \"It looks like you are trying to rescale already rescaled images. If you wish to do this, \"\n \"make sure to set `do_normalize` to `False` and that pixel values are between [-1, 1].\",\n )\n\n if input_data_format is None:\n # We assume that all images have the same channel dimension format.\n input_data_format = infer_channel_dimension_format(images[0])\n\n if do_resize:\n images = [\n self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)\n for image in images\n ]\n\n if do_normalize:\n images = [self.normalize(image=image, input_data_format=input_data_format) for image in images]\n\n if do_color_quantize:\n images = [to_channel_dimension_format(image, ChannelDimension.LAST, input_data_format) for image in images]\n # color quantize from (batch_size, height, width, 3) to (batch_size, height, width)\n images = np.array(images)\n images = color_quantize(images, clusters).reshape(images.shape[:-1])\n\n # flatten to (batch_size, height*width)\n batch_size = images.shape[0]\n images = images.reshape(batch_size, -1)\n\n # We need to convert back to a list of images to keep consistent behaviour across processors.\n images = list(images)\n else:\n images = [\n to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)\n for image in images\n ]\n\n data = {\"input_ids\": images}\n return BatchFeature(data=data, tensor_type=return_tensors)",
"def image_augmentation(dataset_dict):\n dataset_dict = copy.deepcopy(dataset_dict)\n image = utils.read_image(dataset_dict[\"file_name\"], format=\"BGR\")\n\n transform_list = [\n T.RandomCrop(crop_type=\"relative_range\", crop_size=[0.95, 0.87]),\n T.RandomBrightness(0.9, 1.5),\n T.RandomContrast(0.8, 1.6),\n T.RandomSaturation(1.0, 1.6),\n T.RandomRotation(angle=[15, 0, 5, 6, 15], expand=False),\n T.RandomFlip(prob=0.5, horizontal=True, vertical=False),\n T.ResizeScale(1.0, 2.0, target_height=900, target_width=700)\n ]\n\n image, transforms = T.apply_transform_gens(transform_list, image)\n dataset_dict[\"image\"] = torch.as_tensor(image.transpose(2, 0, 1).astype(\"float32\"))\n\n annotations = [\n utils.transform_instance_annotations(obj, transforms, image.shape[:2])\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n\n instances = utils.annotations_to_instances(annotations, image.shape[:2])\n dataset_dict[\"instances\"] = utils.filter_empty_instances(instances)\n\n return dataset_dict",
"def augmentation_nine(filename, aug_type, max_H, max_W, folder=CONFIG.data_folder):\r\n\r\n # image = rgb2grey(mpimg.imread(os.path.join(folder, filename)))\r\n\r\n # rotating a 214 pixel image by 2 deg yield 8 more pixels\r\n image_augmented = np.ones(shape=(max_H, max_W))\r\n image = Image.open(os.path.join(folder, filename))\r\n image = image.convert('RGB')\r\n # note that Image read rgb imgs as 0-255\r\n #################################\r\n # aug_type = 8\r\n\r\n w_ori, h_ori = image.size\r\n\r\n rotate_ind = aug_type % 3\r\n scale_ind = aug_type // 3\r\n\r\n image = PIL.ImageOps.invert(image)\r\n if rotate_ind == 1:\r\n image = image.rotate(2, expand=True)\r\n elif rotate_ind == 2:\r\n image = image.rotate(-2, expand=True)\r\n image = PIL.ImageOps.invert(image)\r\n\r\n h, w = image.size\r\n\r\n if scale_ind == 1:\r\n h, w = np.int(np.floor(h * 0.98)), np.int(np.floor(w * 0.98))\r\n image = image.resize((h, w))\r\n elif scale_ind == 2:\r\n h, w = np.int(np.floor(h * 0.96)), np.int(np.floor(w * 0.96))\r\n image = image.resize((h, w))\r\n\r\n # put image there. 9 images in total. this enhalts shifting.\r\n # scale to (0-1)\r\n image = rgb2grey(np.array(image) / 255)\r\n\r\n h, w = np.shape(image)\r\n\r\n stride_0, stride_1 = (max_H - 10 - h_ori) // 2, (max_W - 10 - w_ori) // 2\r\n offset = ((aug_type % 3) * stride_0, (aug_type % 3) * stride_1)\r\n try:\r\n image_augmented[offset[0]: h + offset[0], offset[1]: w + offset[1]] = image\r\n except ValueError:\r\n print(filename)\r\n\r\n return image_augmented",
"def colorize(image, newColor):\n image = image.copy()\n\n # zero out RGB values\n image.fill((0, 0, 0, 255), None, pygame.BLEND_RGBA_MULT)\n # add in new RGB values\n image.fill(newColor[0:3] + [0,], None, pygame.BLEND_RGBA_ADD)\n\n return image",
"def img_augmentation(augmentation, img, bbox):\n\n # img_copy = img.copy()\n image_shape = img.shape\n h, w = image_shape[0:2]\n\n # Convert the stochastic sequence of augmenters to a deterministic one.\n # The deterministic sequence will always apply the exactly same effects to the images.\n det = augmentation.to_deterministic()\n img_aug = det.augment_image(img)\n\n ia_bbox = list()\n for bounding_box in bbox:\n x1, y1, x2, y2 = bounding_box\n ia_bbox.append(ia.BoundingBox(x1=x1, y1=y1, x2=x2, y2=y2))\n\n bbs = ia.BoundingBoxesOnImage(ia_bbox, shape=image_shape)\n bbs_aug = det.augment_bounding_boxes([bbs])[0]\n # img = bbs_aug.draw_on_image(img)\n\n after_bbox = list()\n for bounding_box in bbs_aug.bounding_boxes:\n bbox_list = [bounding_box.x1_int, bounding_box.y1_int, bounding_box.x2_int, bounding_box.y2_int]\n\n if bbox_list[0] >= w: bbox_list[0] = w - 1\n if bbox_list[1] >= h: bbox_list[1] = h - 1\n if bbox_list[2] >= w: bbox_list[2] = w - 1\n if bbox_list[3] >= h: bbox_list[3] = h - 1\n\n if bbox_list[0] == bbox_list[2] or bbox_list[1] == bbox_list[3]:\n return img_augmentation(augmentation, img, bbox)\n\n bbox_list = list(map(lambda x: max(x, 0), bbox_list))\n after_bbox.append(bbox_list)\n\n assert img_aug.shape == image_shape, \"Augmentation shouldn't change image size\"\n\n return img_aug, after_bbox",
"def placeholder_image_augmentations(\n images,\n model_input_image_size,\n data_augmentations,\n batch_size,\n labels=None,\n aug_lab=False):\n split_images = tf.split(images, batch_size, axis=0)\n if labels is not None:\n split_labels = tf.split(labels, batch_size, axis=0)\n else:\n split_labels = [None] * batch_size\n aug_images, aug_labels = [], []\n for idx in range(batch_size):\n if aug_lab:\n aug_image, aug_label = image_augmentations(\n image=tf.squeeze(split_images[idx], axis=0),\n data_augmentations=data_augmentations,\n model_input_image_size=model_input_image_size,\n label=tf.squeeze(split_labels[idx], axis=0))\n else:\n aug_image, _ = image_augmentations(\n image=tf.squeeze(split_images[idx], axis=0),\n data_augmentations=data_augmentations,\n model_input_image_size=model_input_image_size)\n aug_label = split_labels[idx]\n aug_images += [aug_image]\n aug_labels += [aug_label]\n return tf.stack(aug_images), tf.stack(aug_labels)",
"def image_resize(img, min_size=600, max_size=1000):\n C, H, W = img.shape\n scale1 = min_size / min(H, W)\n scale2 = max_size / max(H, W)\n scale = min(scale1, scale2)\n img = img / 255\n img = transform.resize(img, (C, H * scale, W * scale),\n mode='reflect', anti_aliasing=False)\n # img = pytorch_normalize(img)\n # img = caffe_normalize(img)\n return img",
"def combine_images(args):\n\n # Read all images into a cube (TODO: think about the RAM)\n with fits.open(args.input[0]) as im0:\n lx, ly = im0[0].data.shape\n ref_hdr = im0[0].header\n\n headers = [fits.open(im_name)[0].header for im_name in args.input]\n cube = numpy.ma.zeros((len(args.input), lx, ly))\n cube.mask = numpy.zeros_like(cube.data)\n for ii, im_name in enumerate(args.input):\n with astroim.Astroim(im_name) as im:\n cube.data[ii, :,:] = im.chips[0].data\n if im.chips[0].mask is not None:\n cube.mask[ii,:,:] = im.chips[0].mask\n\n # Scale images\n scale_functions = {\"median\": numpy.ma.median,\n \"mean\": numpy.ma.mean,\n \"mode\": scipy.stats.mstats.mode,\n \"none\": lambda x: 1}\n for ii, im_name in enumerate(args.input):\n func = scale_functions[args.scale.lower()]\n cube[ii,:,:] /= func(cube[ii,:,:])\n\n\n # Reproject all images to the ref_hdr\n for ii, _ in enumerate(args.input):\n if ii == 0:\n continue\n cube.data[ii,:,:], footprint = reproject_interp((cube.data[ii,:,:], headers[ii]), ref_hdr)\n cube.mask[ii,:,:], footprint = reproject_interp((cube.mask[ii,:,:], headers[ii]), ref_hdr)\n #whr = numpy.isnan(cube.data[ii,:,:])\n #cube.mask[ii,:,:][whr] = True\n\n # Do average\n average_functions = {\"median\": numpy.ma.median, \"mean\": numpy.ma.mean, \"sum\": numpy.ma.sum}\n func = average_functions[args.average.lower()]\n final_image = func(cube, axis=0)\n ref_hdr[\"NCOMBINE\"] = len(args.input)\n\n mask_name = utilities.replace_extension(args.output, \".fits.msk\")\n mask_name_header = utilities.replace_extension(os.path.basename(args.output), \".fits.msk\")\n ref_hdr[\"MASK\"] = mask_name_header\n fits.writeto(args.output, final_image.data, ref_hdr, clobber=True )\n fits.writeto(mask_name, numpy.array(final_image.mask, dtype=int), clobber=True)\n\n return args.output",
"def get_augmenter():\n\n augmenter = iaa.Sequential([\n iaa.Fliplr(0.5), # horizontal flips\n iaa.Crop(percent=(0, 0.1)), # random crops\n # Small gaussian blur with random sigma between 0 and 0.5.\n # But we only blur about 50% of all images.\n iaa.Sometimes(\n 0.5,\n iaa.GaussianBlur(sigma=(0, 0.5))\n ),\n # Strengthen or weaken the contrast in each image.\n iaa.LinearContrast((0.75, 1.5)),\n # Add gaussian noise.\n # For 50% of all images, we sample the noise once per pixel.\n # For the other 50% of all images, we sample the noise per pixel AND\n # channel. This can change the color (not only brightness) of the\n # pixels.\n iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5),\n # Make some images brighter and some darker.\n # In 20% of all cases, we sample the multiplier once per channel,\n # which can end up changing the color of the images.\n iaa.Multiply((0.8, 1.2), per_channel=0.2),\n # Apply affine transformations to each image.\n # Scale/zoom them, translate/move them, rotate them and shear them.\n iaa.Affine(\n scale={\"x\": (0.80, 1.2), \"y\": (0.80, 1.2)},\n translate_percent={\"x\": (-0.2, 0.2), \"y\": (-0.2, 0.2)},\n rotate=(-25, 25),\n shear=(-6, 6)\n )\n], random_order=True) # apply augmenters in random order\n\n return augmenter",
"def imgProcessing(self):\n if (self.image_width > 320):\n self.cv_image = imutils.resize(self.cv_image, width = 320)\n else:\n pass\n\n \"\"\" optional -- image-mirrored \"\"\"\n # self.cv_image = cv2.flip(self.cv_image, 1)",
"def augmentation_simple(filename, aug_type, max_H, max_W, folder=CONFIG.data_folder):\r\n\r\n image = rgb2grey(mpimg.imread(os.path.join(folder, filename)))\r\n image_augmented = np.ones(shape=(max_H, max_W))\r\n (h, w) = np.shape(image)\r\n stride_0, stride_1 = max_H - h, (max_W - w) // 2\r\n offset = ((aug_type % 2) * stride_0, (aug_type % 3) * stride_1)\r\n image_augmented[offset[0]: h + offset[0], offset[1]: w + offset[1]] = image\r\n\r\n return image_augmented",
"def create_colorful_test_image(self):\n ch255 = np.full([100, 200, 1], 255, dtype=np.uint8)\n ch128 = np.full([100, 200, 1], 128, dtype=np.uint8)\n ch0 = np.full([100, 200, 1], 0, dtype=np.uint8)\n imr = np.concatenate((ch255, ch128, ch128), axis=2)\n img = np.concatenate((ch255, ch255, ch0), axis=2)\n imb = np.concatenate((ch255, ch0, ch255), axis=2)\n imw = np.concatenate((ch128, ch128, ch128), axis=2)\n imu = np.concatenate((imr, img), axis=1)\n imd = np.concatenate((imb, imw), axis=1)\n image = np.concatenate((imu, imd), axis=0)\n return image",
"def _update_retain_original_image_additional_channels(\n eval_config,\n retain_original_image_additional_channels):\n eval_config.retain_original_image_additional_channels = (\n retain_original_image_additional_channels)",
"def transform_images(img, size):\n return tf.image.resize(img, (size, size)) / 255",
"def image_network_train(learn_data_path):\n\n # data selector ----------\n use_da_data = False\n increase_val = False\n print( \"\\nmode: Use Augmented data: {} | increase validation data: {}\".format(use_da_data, increase_val) )\n\n # First define original train_data only as train_dir\n train_dir = os.path.join(data_dir, \"train\")\n if (use_da_data == True) and (increase_val == False):\n # with_augmented data (no validation increase)\n train_dir = os.path.join(data_dir, \"train_with_aug\")\n validation_dir = os.path.join(data_dir, \"val\") # original validation data\n\n # pair of decreaced train_data and increased validation data\n if (increase_val == True):\n train_dir = os.path.join(data_dir, \"red_train\")\n if (use_da_data == True):\n train_dir = os.path.join(data_dir, \"red_train_with_aug\")\n validation_dir = os.path.join(data_dir, \"validation\")\n\n test_dir = os.path.join(data_dir, \"test\")\n\n print(\"\\ntrain_dir: \", train_dir)\n print(\"validation_dir: \", validation_dir)\n\n\n # calcucate the num of category\n num_category = 0\n for dirpath, dirnames, filenames in os.walk(train_dir):\n for dirname in dirnames:\n num_category += 1\n\n # All images will be resized to 299x299\n image_size = 299\n batch_size = 16\n\n # Rescale all images by 1./255 and apply image augmentation\n train_datagen = keras.preprocessing.image.ImageDataGenerator(rescale=1./255)\n validation_datagen = keras.preprocessing.image.ImageDataGenerator(rescale=1./255)\n test_datagen = keras.preprocessing.image.ImageDataGenerator(rescale=1./255)\n\n # Flow training images in batches of using train_datagen generator\n train_generator = train_datagen.flow_from_directory(\n train_dir, # Source directory for the training images\n target_size=(image_size, image_size),\n batch_size=batch_size,\n class_mode='categorical')\n\n # Flow validation images in batches of 20 using validation_datagen generator\n validation_generator = validation_datagen.flow_from_directory(\n validation_dir, # Source directory for the validation images\n target_size=(image_size, image_size),\n batch_size=batch_size,\n class_mode='categorical')\n\n # Flow validation images in batches of 20 using test_datagen generator\n test_generator = test_datagen.flow_from_directory(\n test_dir, # Source directory for the test images\n target_size=(image_size, image_size),\n batch_size=batch_size,\n class_mode='categorical')\n\n # Create the base model from the pre-trained convnets\n IMG_SHAPE = (image_size, image_size, 3)\n\n # Create the base model from the pre-trained model MobileNet V2\n base_model = keras.applications.xception.Xception(input_shape=IMG_SHAPE, include_top=False, weights='imagenet')\n\n # Freeze the convolutional base\n base_model.trainable = False\n\n # モデル\n model = keras.Sequential([\n base_model,\n keras.layers.GlobalAveragePooling2D(),\n keras.layers.Dense(num_category, activation='softmax')\n ])\n\n # Compile the model\n model.compile(optimizer=keras.optimizers.Adam(lr=0.0001),\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n # early stopping\n es = keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)\n\n model.summary()\n\n # 更新される重みの数\n print('after', len(model.trainable_weights))\n\n # Train the model\n epochs = 30\n steps_per_epoch = train_generator.n // batch_size\n validation_steps = validation_generator.n // batch_size\n test_steps = test_generator.n // batch_size\n\n history = model.fit_generator(train_generator,\n steps_per_epoch = steps_per_epoch,\n epochs=epochs,\n workers=4,\n validation_data=validation_generator,\n validation_steps=validation_steps,\n callbacks=[es],\n class_weight={0:1.0, 1:0.4})\n\n loss, acc = model.evaluate_generator(validation_generator, steps=validation_steps)\n print('val loss: {}, val acc: {}'.format(loss, acc))\n\n # Fine tuning\n # Un-freeze the top layers of the model\n base_model.trainable = True\n\n # The nums of layers are in the base model\n print(\"Number of layers in the base model: \", len(base_model.layers))\n\n # Fine tune from this layer onwards\n fine_tune_at = 108\n\n # Freeze all the layers before the `fine_tune_at` layer\n for layer in base_model.layers[:fine_tune_at]:\n layer.trainable = False\n\n # Compile the model using a much-lower training rate\n model.compile(optimizer = keras.optimizers.Adam(lr=2e-5),\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n model.summary()\n\n # 更新される重みの数\n print('after Fine tune', len(model.trainable_weights))\n\n # Continue Train the model\n history_fine = model.fit_generator(train_generator,\n steps_per_epoch = steps_per_epoch,\n epochs=epochs,\n workers=4,\n validation_data=validation_generator,\n validation_steps=validation_steps,\n callbacks=[es],\n class_weight={0:1.0, 1:0.4})\n\n\n # print(history_fine.history)\n model_val_acc = history_fine.history['val_accuracy'][-1]\n print('val_acc: ', model_val_acc)\n\n # save model into hdf5 file ----------\n model.save(learn_data_path + '/shen_model.h5')\n\n loss, acc = model.evaluate_generator(validation_generator, steps=validation_steps)\n print('val loss: {}, val acc: {}'.format(loss, acc))\n\n loss, acc = model.evaluate_generator(test_generator, steps=test_steps)\n print('Test loss: {}, Test acc: {}'.format(loss, acc))",
"def rescale_and_resize_image(img_name, img_size, save_img):\n \n try:\n \n # Open image\n img = Image.open(img_name)\n \n # Resize image\n img = img.resize((int(img_size), int(img_size)), Image.ANTIALIAS) \n \n # Get data from image\n img_list = list(img.getdata())\n \n # Find minimum and maximum value pixels in the image\n img_max = max(img_list)\n img_min = min(img_list)\n \n # Determine factor to scale to a 8-bit image\n scale_factor = 255.0/(img_max - img_min)\n \n img_list_new = [0] * img_size * img_size\n \n # Rescale all pixels to the range 0 to 255 (in line with unit8 values)\n for i in range(0,img_size):\n for j in range(0,img_size):\n img_list_new[i*img_size + j] = int((img_list[i*img_size + j]-img_min)*scale_factor)\n if (img_list_new[i*img_size + j] > 255) or (img_list_new[i*img_size + j] < 0) or (img_list_new[i*img_size + j]-int(img_list_new[i*img_size + j]) != 0):\n print(\"img_list_new[%d][%d] = %r\" % (i,j,img_list_new[i*img_size + j]))\n \n img.putdata(img_list_new)\n \n # Convert to uint8 greyscale image\n img = img.convert('L')\n \n # Save image\n if save_img:\n img.save(img_name)\n else:\n ph = ImageTk.PhotoImage(img)\n return ph\n \n finally:\n \n # Close image\n img.close()",
"def rescale_images(original_images):\n mobile_net_possible_dims = [128, 160, 192, 224]\n dim_goal = 128\n \n for dim in mobile_net_possible_dims:\n if original_images.shape[1] <= dim:\n dim_goal = dim\n break;\n print(f\"Image rescaled from dimension {original_images.shape[1]} to {dim_goal} for MobileNet\")\n scale = dim_goal/original_images.shape[1]\n images = np.empty((original_images.shape[0], dim_goal, dim_goal))\n for i, original_image in enumerate(original_images):\n images[i] = rescale(original_image, (scale, scale), multichannel=False)\n return images",
"def greyscale_make_transforms(config):\n tf1_list = [] #transforms applied to original images\n tf2_list = [] #transforms applied to randomly augmented images\n tf3_list = [] #transforms applied at test time\n\n tf1_list += [torchvision.transforms.Resize(config.input_sz)]\n tf3_list += [torchvision.transforms.Resize(config.input_sz)]\n\n # random rotation\n if config.rot_val > 0:\n # 50-50 do rotation or not\n tf2_list += [torchvision.transforms.RandomApply([torchvision.transforms.RandomRotation(config.rot_val)], p=0.5)]\n\n #multiple crops\n if config.multiple_crop:\n imgs_tf_crops = []\n for tf2_crop_sz in config.crop_szs:\n tf2_crop_fn = torchvision.transforms.RandomChoice([\n torchvision.transforms.RandomCrop(tf2_crop_sz),\n torchvision.transforms.CenterCrop(tf2_crop_sz)\n ])\n imgs_tf_crops.append(tf2_crop_fn)\n tf2_list += [torchvision.transforms.RandomChoice(imgs_tf_crops)]\n \n tf2_list += [torchvision.transforms.Resize(tuple(np.array([config.input_sz, config.input_sz])))]\n tf2_list += [torchvision.transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.125)]\n #greyscaling\n tf1_list.append(custom_greyscale_to_tensor(config.include_rgb))\n tf2_list.append(custom_greyscale_to_tensor(config.include_rgb))\n tf3_list.append(custom_greyscale_to_tensor(config.include_rgb))\n\n tf1 = torchvision.transforms.Compose(tf1_list)\n tf2 = torchvision.transforms.Compose(tf2_list)\n tf3 = torchvision.transforms.Compose(tf3_list)\n return tf1, tf2, tf3"
] | [
"0.6309083",
"0.5947522",
"0.59078306",
"0.5813178",
"0.57557356",
"0.57447666",
"0.57365495",
"0.56254",
"0.5547125",
"0.54938376",
"0.54680014",
"0.5387826",
"0.534617",
"0.53367794",
"0.5330937",
"0.53264517",
"0.5302511",
"0.5274359",
"0.52589434",
"0.52519834",
"0.5238223",
"0.52331483",
"0.52126163",
"0.52118933",
"0.5202271",
"0.5189902",
"0.51806676",
"0.5163229",
"0.5146369",
"0.5105428"
] | 0.71007115 | 0 |
Create multiple sync instances from the config and filesystem info. | def create_all_sync_instances(self):
# Get directories to sync
dirs_to_sync_by_sync_instance = self.get_dirs_to_sync(self.config['sync_hierarchy_rules'])
# Store all known running sync instances here to potentially kill later
# unhandled_sync_instances = copy.deepcopy(dirs_to_sync_by_sync_instance)
unhandled_sync_instances = copy.deepcopy(self.data_storage.running_data)
# Loop through each entry in the dict and create a sync instance for it
for instance_name, dirs_to_sync in dirs_to_sync_by_sync_instance.items():
# Mark this instance as handled so it's not killed later
unhandled_sync_instances.pop(instance_name, None)
# Make new sync instance
self.create_sync_instance(instance_name, dirs_to_sync)
# Kill any instances in unhandled_sync_instances, because they are
# no longer required needed
for inst_to_kill in unhandled_sync_instances:
self.logger.debug(
"Cleaning up instance '" + inst_to_kill + "'" +
" which is no longer needed."
)
self.kill_sync_instance_by_pid(self.data_storage.running_data[inst_to_kill]['pid']) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run(self):\n self.create_all_sync_instances()",
"def create_sync_objects(self, subcloud_name, capabilities):\n\n endpoint_type_list = capabilities.get('endpoint_types', None)\n if endpoint_type_list:\n self.sync_objs[subcloud_name] = {}\n for endpoint_type in endpoint_type_list:\n LOG.info(\"Engine id:(%s) create %s/%s sync obj\" %\n (self.engine_id, subcloud_name, endpoint_type))\n sync_obj = sync_object_class_map[endpoint_type](subcloud_name,\n endpoint_type)\n self.sync_objs[subcloud_name].update({\n endpoint_type: sync_obj})",
"def sync_ready_files(self, sync_config=None):\n\n if sync_config is None:\n sync_config = config.getSettingJson(config.CONSENT_SYNC_BUCKETS)\n\n hpos_sync_config = sync_config['hpos']\n orgs_sync_config = sync_config['orgs']\n\n filters = {\n hpo_name: {\n 'exclude_types': [\n ConsentType(excluded_type_str) for excluded_type_str in options['exclude_types']\n ]\n }\n for hpo_name, options in hpos_sync_config.items()\n if 'exclude_types' in options\n }\n file_list: List[ConsentFile] = self.consent_dao.get_files_ready_to_sync(\n hpo_names=hpos_sync_config.keys(),\n org_names=orgs_sync_config.keys(),\n additional_filters=filters\n )\n\n pairing_info_map = self._build_participant_pairing_map(file_list)\n\n # Build out a FileSync for each possible PairingInfo\n sync_pairing_map: Dict[ParticipantPairingInfo, BaseFileSync] = {}\n for pairing_info in pairing_info_map.values():\n if pairing_info not in sync_pairing_map:\n org_config = orgs_sync_config.get(pairing_info.org_name)\n if org_config:\n config_data = org_config\n else:\n config_data = hpos_sync_config.get(pairing_info.hpo_name)\n\n if not config_data:\n # No need to build sync handlers for anything not in the config\n continue\n\n sync_pairing_map[pairing_info] = self._build_sync_handler(\n zip_files=config_data['zip_consents'],\n bucket=config_data['bucket'],\n pairing_info=pairing_info\n )\n\n for file in file_list:\n pairing_info = pairing_info_map.get(file.participant_id, None)\n if not pairing_info:\n # Skip files for unpaired participants\n continue\n\n # Retrieve the sync handler based on the pairing information\n file_group = sync_pairing_map.get(pairing_info)\n if not file_group:\n # Ignore participants paired to an org or hpo we aren't syncing files for\n continue\n\n file_group.files_to_sync.append(file)\n\n with self.consent_dao.session() as session:\n for file_group in sync_pairing_map.values():\n files_synced = file_group.sync_file_list()\n\n # Update the database after each group syncs so ones\n # that have succeeded so far get saved if a later one fails\n if len(files_synced):\n self.consent_dao.batch_update_consent_files(session=session, consent_files=files_synced)\n session.commit()\n\n # Queue tasks to rebuild consent metrics resource data records (for PDR)\n dispatch_rebuild_consent_metrics_tasks([file.id for file in files_synced])",
"def __init__(self,server_list):\n self.workers=[]\n self.worker_by_name={}\n worker_id = 1\n for host,port in server_list:\n # Add the uid here can help with port conflicts, but only works\n # on Unix clusters. We really need to work out a daemon service\n # model that makes the port mess transparent.\n port = port #+ os.getuid()\n new_worker = sync_cluster.standard_sync_client(host,port,worker_id)\n self.workers.append(new_worker)\n self.worker_by_name[host] = new_worker\n worker_id = worker_id + 1",
"def make_mock_fs(self):\n\t\ttemp = tempfile.mkdtemp(prefix=\"fpltest\")\n\t\ttry:\n\t\t\tconfig = fplsync.Config()\n\t\t\tconfig.playlist_source = os.path.join(temp, \"fb2k_playlists\")\n\t\t\tconfig.source = os.path.join(temp, \"source\")\n\t\t\tconfig.dest = os.path.join(temp, \"dest\")\n\t\t\tconfig.playlist_dest = os.path.join(temp, \"playlists\")\n\t\t\tos.mkdir(config.source)\n\t\t\twith open(os.path.join(config.source, \"a.mp3\"), \"w\") as f:\n\t\t\t\tprint(\"a\" * 1000, file=f, end=\"\")\n\t\t\twith open(os.path.join(config.source, \"b.mp3\"), \"w\") as f:\n\t\t\t\tprint(\"b\" * 1000, file=f, end=\"\")\n\t\t\twith open(os.path.join(config.source, \"c.mp3\"), \"w\") as f:\n\t\t\t\tprint(\"c\" * 1000, file=f, end=\"\")\n\t\t\tos.mkdir(config.dest)\n\t\t\tos.mkdir(config.playlist_source)\n\t\t\tos.mkdir(config.playlist_dest)\n\t\t\tyield config\n\t\tfinally:\n\t\t\tshutil.rmtree(temp)",
"def create_sync_instance(self, instance_name, dirs_to_sync):\n # TODO: check global config hash here too, not just instance-specific config\n self.logger.debug(\n \"Processing instance '\" + instance_name + \"' , deciding whether\" +\n \"to kill or not\"\n )\n\n # Obtain a hash of the requested config to be able to later check if\n # the instance should be killed and restarted or not.\n # This hash will be stored with the instance data, and if it changes,\n # the instance will be killed and restarted so that new config can be\n # applied.\n config_hash = hashlib.sha256((\n\n # Include the instance name in the config hash\n str(instance_name) +\n\n # Include the directories to sync in the config hash\n str(dirs_to_sync) +\n\n # Include the global config in the config hash\n str(self.config['global_unison_config_options'])\n\n ).encode('utf-8')).hexdigest()\n\n # Get data from requested instance, if there is any\n requested_instance = self.data_storage.get_data(instance_name)\n\n if requested_instance is None:\n\n # No instance data found, must start new one\n self.logger.info(\n \"Instance '\" + instance_name + \"' \" +\n \"No instance data found, starting new sync instance.\"\n )\n\n elif requested_instance['config_hash'] == config_hash:\n # Existing instance data found, still uses same config - no restart\n self.logger.debug(\n \"Instance '\" + instance_name + \"' \" +\n \"Instance data found, config still unchanged.\"\n )\n return False\n else:\n # Existing instance data found, but uses different config, so restarting\n self.logger.info(\n \"Instance '\" + instance_name + \"' \" +\n \"Instance data found, but config or directories to sync have\" +\n \" changed. Restarting instance.\"\n )\n\n self.kill_sync_instance_by_pid(requested_instance['pid'])\n self.data_storage.remove_data(requested_instance['syncname'])\n\n # Process dirs into a format for unison command line arguments\n dirs_for_unison = []\n trimmed_dirs = []\n amount_to_clip = (len(self.config['unison_local_root']) + 1)\n\n for dir in dirs_to_sync:\n\n # Clip off directory from local root\n dir_trimmed = dir[amount_to_clip:]\n\n # Format for unison command line args\n pathstr = \"-path=\" + dir_trimmed + \"\"\n\n # Append to list for args\n dirs_for_unison.append(pathstr)\n\n # Append to list for config storage\n trimmed_dirs.append(dir_trimmed)\n\n # Basic verification check (by no means complete)\n\n # Ensure local root exists\n if not os.path.isdir(self.config['unison_local_root']):\n raise IOError(\"Local root directory does not exist\")\n\n # Convert SSH config info into connection string\n remote_path_connection_string = (\n \"\" +\n \"ssh://\" +\n str(self.config['unison_remote_ssh_conn']) +\n \"/\" +\n str(self.config['unison_remote_root']) +\n \"\"\n )\n\n # todo: add '-label' here\n\n # print(remote_path_connection_string)\n\n # Check if SSH config key is specified\n if self.config['unison_remote_ssh_keyfile'] == \"\":\n # Key is not specified, don't use it\n # TODO: reformat this entry\n self.logger.debug(\"SSH key not specified\")\n\n else:\n # Key is specified\n # TODO: reformat this entry\n self.logger.debug(\"Key specified: \" + self.config['unison_remote_ssh_keyfile'])\n\n remote_path_connection_string = (\n remote_path_connection_string +\n \" -sshargs='-i \" +\n self.config['unison_remote_ssh_keyfile'] +\n \"'\"\n )\n\n # print(remote_path_connection_string)\n\n # Set env vars to pass to unison\n envvars = {\n 'UNISONLOCALHOSTNAME': self.config['unison_local_hostname'],\n 'HOME': self.config['unison_home_dir'],\n 'USER': self.config['unison_user'],\n 'LOGNAME': self.config['unison_user'],\n 'PWD': self.config['unison_home_dir'],\n }\n\n logfile = self.config['unison_log_dir'] + os.sep + instance_name + \".log\"\n self.touch(logfile)\n\n # Start unison\n cmd = (\n [self.config['unison_path']] +\n [\"\" + str(self.config['unison_local_root']) + \"\"] +\n [remote_path_connection_string] +\n [\"-label=unisonctrl-\" + instance_name] +\n dirs_for_unison +\n self.config['global_unison_config_options'] +\n [\"-log=true\"] +\n [\n \"-logfile=\" +\n logfile\n ]\n )\n\n # self.logger.info(\" \".join(cmd))\n\n running_instance_pid = subprocess.Popen(\n cmd,\n stdin=subprocess.DEVNULL,\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL, # close_fds=True,\n env=envvars\n ).pid\n\n instance_info = {\n \"pid\": running_instance_pid,\n \"syncname\": instance_name,\n \"config_hash\": config_hash,\n \"dirs_to_sync\": trimmed_dirs\n }\n\n self.logger.info(\n \"New instance '\" + instance_name + \"' \" +\n \" (PID \" + str(instance_info['pid']) + \").\"\n )\n\n # Store instance info\n self.data_storage.set_data(instance_name, instance_info)\n\n # New instance was created, return true\n return True",
"def sync(self, **kwargs):\n volume_1 = kwargs['NAMES'][0]\n volume_2 = kwargs['NAMES'][1]\n path1 = f\"{self.cm.find_name(name=volume_1)[0]['path']}/{volume_1}/\"\n path2 = f\"{self.cm.find_name(name=volume_2)[0]['path']}/{volume_2}/\"\n os.system(f\"rsync -avzh {path2} {path1}\")\n kwargs1 = {'NAME': volume_1, 'key': \"sync_with\", 'value': volume_2}\n volume_info1 = self.add_tag(**kwargs1)\n result = [volume_info1]\n return result",
"def create_instances(self):\n disk_d = \"//\"+self.host+\"/d$\"\n mask = r\"^IBM$|^WebSphere.*\"\n root_flag = 0\n # print(os.listdir(disk_d)) #checkpoint\n for item in os.listdir(disk_d):\n searchObj = re.search(mask, item, re.M|re.I)\n if searchObj:\n root_flag = 1\n rootdir=disk_d+\"/\"+searchObj.group()\n # print(rootdir) #checkpoint\n\n if os.path.isdir(rootdir):\n candidates=os.listdir(rootdir)\n # print(candidates) #checkpoint\n for candidate in candidates:\n if os.path.isdir(rootdir+'/'+candidate+'/profiles'):\n user_install_root=rootdir+'/'+candidate\n candidate_instance=Instance(user_install_root)\n candidate_instance.get_profiles()\n if candidate_instance.profiles:\n self.instances.append(candidate_instance)\n # print(candidate_instance.uir+\": \"+str(candidate_instance.profiles)) #checkpoint\n\n if root_flag == 0: print(self.host+\" does not have IBM or WebSphere directory on disk D\")",
"def _sync_files(self, sys_type='generic'):\n sys_path = {'admin': '/root/local/admin/',\n 'proxy': '/root/local/proxy/',\n 'storage': '/root/local/storage/',\n 'generic': '/root/local/common/'}\n\n if sys_type == 'generic':\n sudo('rsync -aq0c --exclude=\".git\" --exclude=\".ignore\" %s /'\n % (sys_path[sys_type]))\n elif sys_type == 'saio':\n for x in ['generic', 'proxy', 'storage']:\n sudo('rsync -aq0c --exclude=\".git\" --exclude=\".ignore\" %s /'\n % (sys_path[x]))\n else:\n sudo('rsync -aq0c --exclude=\".git\" --exclude=\".ignore\" %s /'\n % (sys_path['generic']))\n sudo('rsync -aq0c --exclude=\".git\" --exclude=\".ignore\" %s /'\n % (sys_path[sys_type]))",
"def sync(self, vault_client, opt):\n active_mounts = []\n for audit_log in self.logs():\n audit_log.sync(vault_client)\n\n # Handle policies only on the first pass. This allows us\n # to ensure that ACL's are in place prior to actually\n # making any changes.\n not_policies = self.sync_policies(vault_client)\n # Handle auth wrapper resources on the next path. The resources\n # may update a path on their own. They may also provide mount\n # tuning information.\n not_auth = self.sync_auth(vault_client, not_policies)\n # Handle mounts only on the next pass. This allows us to\n # ensure that everything is in order prior to actually\n # provisioning secrets. Note we handle removals before\n # anything else, allowing us to address mount conflicts.\n active_mounts, not_mounts = self.sync_mounts(active_mounts,\n not_auth,\n vault_client)\n # Now handle everything else. If \"best practices\" are being\n # adhered to then every generic mountpoint should exist by now.\n # We handle \"child\" resources after the first batch\n sorted_resources = sorted(not_mounts, key=childless_first)\n for resource in sorted_resources:\n resource.sync(vault_client)\n\n for mount in self.mounts():\n if not find_backend(mount.path, active_mounts):\n mount.unmount(vault_client)\n\n if opt.remove_unknown:\n self.prune(vault_client)",
"def sync(self, vault_client):\n active_mounts = []\n for mount in self.mounts():\n if not mount.existing:\n mount.sync(vault_client)\n for auth in self.auths():\n if not auth.existing:\n auth.sync(vault_client)\n for blog in self.logs():\n if not blog.existing:\n blog.sync(vault_client)\n for resource in self.resources():\n if isinstance(resource, (Secret, Mount)) and resource.present:\n active_mount = find_backend(resource.mount, active_mounts)\n if not active_mount:\n actual_mount = find_backend(resource.mount, self._mounts)\n if actual_mount:\n active_mounts.append(actual_mount)\n\n resource.sync(vault_client)\n\n for mount in self.mounts():\n if not find_backend(mount.path, active_mounts):\n mount.unmount(vault_client)",
"def create_chain_instances(self):\n for section in self.config_reader.sections():\n self.read_chain(section)",
"def doSync (self) :\r\n \r\n self.factory.getSyncFor(self)",
"def init():\n\n globs.PROGDIR = os.getcwd() #Save the program directory\n \n\n # Prepare to Read from Property File\n\n globs.SYSTEM_USER_KEYS = ('System_Username', 'System_Password')\n globs.SCHEMA_CREDS_KEYS = [('WebWORKS_Username', 'WebWORKS_Password'), ('ABPP_Username', 'ABPP_Password'), ('Monitor_Username', 'Monitor_Password'), ('JDA_SYSTEM_Username', 'JDA_SYSTEM_Password'), ('SCPO_Username', 'SCPO_Password')]\n globs.CRED_DICT = {\n 'System_Username':'System_Password', \n 'WebWORKS_Username':'WebWORKS_Password',\n 'ABPP_Username':'ABPP_Password',\n 'Monitor_Username':'Monitor_Password',\n 'JDA_SYSTEM_Username':'JDA_SYSTEM_Password',\n 'SCPO_Username':'SCPO_Password'\n }\n\n # The following Queue(deque) will contain all Tasks \n globs.TQueue = deque()\n\n # Create an instance of TaskUpdate Signal and connect it\n globs.UpdateSignal = UpdateSignal()\n globs.UpdateSignal.updateTask.connect(updater)\n\n # Error Signal\n globs.SignalObj = CSignal()\n globs.SignalObj.updateErrorSignal.connect(dispErr)\n\n globs.LogPipe = LogPipe()\n globs.ABPP_CREATED = False\n createLogFolders()\n \n errorEvent = threading.Event()\n globs.ERREVENT = errorEvent",
"def create(self):\n\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"] or sample_info[\"sample_category\"] in [ \"additional_signal_overlap\", \"background_data_estimate\" ]:\n continue\n process_name = sample_info[\"process_name_specific\"]\n for charge_selection in self.charge_selections:\n key_dir = getKey(process_name, charge_selection)\n for dir_type in [ DKEY_CFGS, DKEY_HIST, DKEY_LOGS, DKEY_RLES ]:\n initDict(self.dirs, [ key_dir, dir_type ])\n if dir_type in [ DKEY_CFGS, DKEY_LOGS ]:\n self.dirs[key_dir][dir_type] = os.path.join(self.configDir, dir_type, self.channel,\n \"_\".join([ charge_selection ]), process_name)\n else:\n self.dirs[key_dir][dir_type] = os.path.join(self.outputDir, dir_type, self.channel,\n \"_\".join([ charge_selection ]), process_name)\n for dir_type in [ DKEY_CFGS, DKEY_SCRIPTS, DKEY_HIST, DKEY_LOGS, DKEY_DCRD, DKEY_PLOT, DKEY_HADD_RT ]:\n initDict(self.dirs, [ dir_type ])\n if dir_type in [ DKEY_CFGS, DKEY_SCRIPTS, DKEY_LOGS, DKEY_DCRD, DKEY_PLOT, DKEY_HADD_RT ]:\n self.dirs[dir_type] = os.path.join(self.configDir, dir_type, self.channel)\n else:\n self.dirs[dir_type] = os.path.join(self.outputDir, dir_type, self.channel)\n ##print \"self.dirs = \", self.dirs\n\n for key in self.dirs.keys():\n if type(self.dirs[key]) == dict:\n for dir_type in self.dirs[key].keys():\n create_if_not_exists(self.dirs[key][dir_type])\n else:\n create_if_not_exists(self.dirs[key])\n\n inputFileLists = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"] or sample_info[\"sample_category\"] in [ \"additional_signal_overlap\", \"background_data_estimate\" ]:\n continue\n logging.info(\"Checking input files for sample %s\" % sample_info[\"process_name_specific\"])\n inputFileLists[sample_name] = generateInputFileList(sample_name, sample_info, self.max_files_per_job, self.debug)\n \n self.inputFileIds = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"] or sample_info[\"sample_category\"] in [ \"additional_signal_overlap\", \"background_data_estimate\" ]:\n continue\n\n process_name = sample_info[\"process_name_specific\"]\n\n logging.info(\"Creating configuration files to run '%s' for sample %s\" % (self.executable_analyze, process_name)) \n\n is_mc = (sample_info[\"type\"] == \"mc\")\n lumi_scale = 1. if not (self.use_lumi and is_mc) else sample_info[\"xsection\"] * self.lumi / sample_info[\"nof_events\"]\n apply_genWeight = sample_info[\"apply_genWeight\"] if (is_mc and \"apply_genWeight\" in sample_info.keys()) else False\n sample_category = sample_info[\"sample_category\"]\n triggers = sample_info[\"triggers\"]\n apply_trigger_bits = (is_mc and (self.era == \"2015\" or (self.era == \"2016\" and sample_info[\"reHLT\"]))) or not is_mc\n\n for charge_selection in self.charge_selections:\n for central_or_shift in self.central_or_shifts:\n\n inputFileList = inputFileLists[sample_name]\n for jobId in inputFileList.keys():\n if central_or_shift != \"central\" and not is_mc:\n continue\n if central_or_shift.startswith(\"CMS_ttHl_thu_shape_ttH\") and sample_category != \"signal\":\n continue\n if central_or_shift.startswith(\"CMS_ttHl_thu_shape_ttW\") and sample_category != \"TTW\":\n continue\n if central_or_shift.startswith(\"CMS_ttHl_thu_shape_ttZ\") and sample_category != \"TTZ\":\n continue\n\n # build config files for executing analysis code\n key_dir = getKey(process_name, charge_selection)\n key_analyze_job = getKey(process_name, charge_selection, central_or_shift, jobId)\n\n ntupleFiles = inputFileList[jobId]\n if len(ntupleFiles) == 0:\n print \"Warning: ntupleFiles['%s'] = %s --> skipping job !!\" % (key_job, ntupleFiles)\n continue\n self.jobOptions_analyze[key_analyze_job] = {\n 'ntupleFiles' : ntupleFiles,\n 'cfgFile_modified' : os.path.join(self.dirs[key_dir][DKEY_CFGS], \"analyze_%s_%s_%s_%s_%i_cfg.py\" % \\\n (self.channel, process_name, charge_selection, central_or_shift, jobId)),\n 'histogramFile' : os.path.join(self.dirs[key_dir][DKEY_HIST], \"%s_%s_%s_%i.root\" % \\\n (process_name, charge_selection, central_or_shift, jobId)),\n 'logFile' : os.path.join(self.dirs[key_dir][DKEY_LOGS], \"analyze_%s_%s_%s_%s_%i.log\" % \\\n (self.channel, process_name, charge_selection, central_or_shift, jobId)),\n 'sample_category' : sample_category,\n 'triggers' : sample_info[\"triggers\"],\n 'charge_selection' : charge_selection,\n 'jet_minPt' : self.jet_minPt,\n 'jet_maxPt' : self.jet_maxPt,\n 'jet_minAbsEta' : self.jet_minAbsEta,\n 'jet_maxAbsEta' : self.jet_maxAbsEta,\n 'hadTau_selection_denominator' : self.hadTau_selection_denominator,\n 'hadTau_selections_numerator' : self.hadTau_selections_numerator,\n 'absEtaBins' : self.absEtaBins,\n ##'use_HIP_mitigation_mediumMuonId' : sample_info[\"use_HIP_mitigation_mediumMuonId\"],\n 'use_HIP_mitigation_mediumMuonId' : True,\n 'is_mc' : is_mc,\n 'central_or_shift' : central_or_shift,\n 'lumi_scale' : 1. if not (self.use_lumi and is_mc) else sample_info[\"xsection\"] * self.lumi / sample_info[\"nof_events\"],\n 'apply_genWeight' : sample_info[\"genWeight\"] if (is_mc and \"genWeight\" in sample_info.keys()) else False,\n 'apply_trigger_bits' : (is_mc and (self.era == \"2015\" or (self.era == \"2016\" and sample_info[\"reHLT\"]))) or not is_mc,\n }\n self.createCfg_analyze(self.jobOptions_analyze[key_analyze_job])\n\n # initialize input and output file names for hadd_stage1\n key_hadd_stage1 = getKey(process_name, charge_selection)\n if not key_hadd_stage1 in self.inputFiles_hadd_stage1:\n self.inputFiles_hadd_stage1[key_hadd_stage1] = []\n self.inputFiles_hadd_stage1[key_hadd_stage1].append(self.jobOptions_analyze[key_analyze_job]['histogramFile'])\n self.outputFile_hadd_stage1[key_hadd_stage1] = os.path.join(self.dirs[DKEY_HIST], \"histograms_harvested_stage1_%s_%s_%s.root\" % \\\n (self.channel, process_name, charge_selection))\n\n # initialize input and output file names for hadd_stage2\n key_hadd_stage1 = getKey(process_name, charge_selection)\n key_hadd_stage2 = getKey(charge_selection)\n if not key_hadd_stage2 in self.inputFiles_hadd_stage2:\n self.inputFiles_hadd_stage2[key_hadd_stage2] = []\n self.inputFiles_hadd_stage2[key_hadd_stage2].append(self.outputFile_hadd_stage1[key_hadd_stage1])\n self.outputFile_hadd_stage2[key_hadd_stage2] = os.path.join(self.dirs[DKEY_HIST], \"histograms_harvested_stage2_%s_%s.root\" % \\\n (self.channel, charge_selection))\n\n logging.info(\"Creating configuration files for executing 'comp_jetToTauFakeRate'\")\n for charge_selection in self.charge_selections:\n key_comp_jetToTauFakeRate_job = getKey(charge_selection)\n key_hadd_stage2 = getKey(charge_selection)\n self.jobOptions_comp_jetToTauFakeRate[key_comp_jetToTauFakeRate_job] = {\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2],\n 'cfgFile_modified' : os.path.join(\n self.dirs[DKEY_CFGS], \"comp_jetToTauFakeRate_%s_cfg.py\" % charge_selection),\n 'outputFile' : os.path.join(\n self.dirs[DKEY_HIST], \"comp_jetToTauFakeRate_%s.root\" % charge_selection),\n 'logFile' : os.path.join(\n self.dirs[DKEY_LOGS], \"comp_jetToTauFakeRate_%s.log\" % charge_selection),\n 'looseRegion' : \"jetToTauFakeRate_%s/denominator/\" % charge_selection,\n 'tightRegion' : \"jetToTauFakeRate_%s/numerator/\" % charge_selection,\n 'absEtaBins' : self.absEtaBins,\n 'ptBins' : self.ptBins\n }\n self.createCfg_comp_jetToTauFakeRate(self.jobOptions_comp_jetToTauFakeRate[key_comp_jetToTauFakeRate_job])\n self.targets.append(self.jobOptions_comp_jetToTauFakeRate[key_comp_jetToTauFakeRate_job]['outputFile'])\n\n logging.info(\"Creating configuration files to run 'makePlots'\")\n for charge_selection in self.charge_selections:\n key_makePlots_job = getKey(charge_selection)\n key_hadd_stage2 = getKey(charge_selection)\n self.jobOptions_make_plots[key_makePlots_job] = {\n 'executable' : self.executable_make_plots,\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2],\n 'cfgFile_modified' : os.path.join(\n self.dirs[DKEY_CFGS], \"makePlots_%s_cfg.py\" % self.channel),\n 'outputFile' : os.path.join(\n self.dirs[DKEY_PLOT], \"makePlots_%s.png\" % self.channel),\n 'histogramDir' : \"jetToTauFakeRate_%s\" % charge_selection,\n 'label' : None,\n 'make_plots_backgrounds' : [ \"TT\", \"TTW\", \"TTZ\", \"EWK\", \"Rares\" ],\n }\n self.createCfg_makePlots(self.jobOptions_make_plots[key_makePlots_job])\n self.cfgFile_make_plots = self.cfgFile_make_plots_denominator\n for absEtaBin in [ \"absEtaLt1_5\", \"absEta1_5to9_9\" ]:\n key_makePlots_job = getKey(charge_selection, absEtaBin, \"denominator\")\n key_hadd_stage2 = getKey(charge_selection)\n self.jobOptions_make_plots[key_makePlots_job] = {\n 'executable' : self.executable_make_plots,\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2],\n 'cfgFile_modified' : os.path.join(\n self.dirs[DKEY_CFGS], \"makePlots_%s_%s_denominator_%s_cfg.py\" % (self.channel, charge_selection, absEtaBin)),\n 'outputFile' : os.path.join(\n self.dirs[DKEY_PLOT], \"makePlots_%s_%s_denominator_%s.png\" % (self.channel, charge_selection, absEtaBin)),\n 'histogramDir' : \"jetToTauFakeRate_%s/denominator/%s\" % (charge_selection, absEtaBin),\n 'label' : None,\n 'make_plots_backgrounds' : [ \"TT\", \"TTW\", \"TTZ\", \"EWK\", \"Rares\" ],\n }\n self.createCfg_makePlots(self.jobOptions_make_plots[key_makePlots_job])\n for hadTau_selection_numerator in self.hadTau_selections_numerator:\n key_makePlots_job = getKey(charge_selection, absEtaBin, \"numerator\", hadTau_selection_numerator)\n key_hadd_stage2 = getKey(charge_selection)\n self.jobOptions_make_plots[key_makePlots_job] = {\n 'executable' : self.executable_make_plots,\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2],\n 'cfgFile_modified' : os.path.join(\n self.dirs[DKEY_CFGS], \"makePlots_%s_%s_numerator_%s_%s_cfg.py\" % (self.channel, charge_selection, hadTau_selection_numerator, absEtaBin)),\n 'outputFile' : os.path.join(\n self.dirs[DKEY_PLOT], \"makePlots_%s_%s_numerator_%s_%s.png\" % (self.channel, charge_selection, hadTau_selection_numerator, absEtaBin)),\n 'histogramDir' : \"jetToTauFakeRate_%s/numerator/%s/%s\" % (charge_selection, hadTau_selection_numerator, absEtaBin),\n 'label' : None,\n 'make_plots_backgrounds' : [ \"TT\", \"TTW\", \"TTZ\", \"EWK\", \"Rares\" ],\n }\n self.createCfg_makePlots(self.jobOptions_make_plots[key_makePlots_job])\n\n if self.is_sbatch:\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable_analyze)\n self.sbatchFile_analyze = os.path.join(self.dirs[DKEY_SCRIPTS], \"sbatch_analyze_%s.py\" % self.channel)\n self.createScript_sbatch_analyze(self.executable_analyze, self.sbatchFile_analyze, self.jobOptions_analyze)\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable_comp_jetToTauFakeRate)\n self.sbatchFile_comp_jetToTauFakeRate = os.path.join(self.dirs[DKEY_SCRIPTS], \"sbatch_comp_jetToTauFakeRate.py\")\n self.createScript_sbatch(self.executable_comp_jetToTauFakeRate, self.sbatchFile_comp_jetToTauFakeRate, self.jobOptions_comp_jetToTauFakeRate)\n\n lines_makefile = []\n self.addToMakefile_analyze(lines_makefile)\n self.addToMakefile_hadd_stage1(lines_makefile)\n self.addToMakefile_hadd_stage2(lines_makefile)\n self.addToMakefile_comp_jetToTauFakeRate(lines_makefile)\n self.addToMakefile_make_plots(lines_makefile)\n self.createMakefile(lines_makefile)\n \n logging.info(\"Done\")",
"def create(self):\n\n for key in self.dirs.keys():\n if type(self.dirs[key]) == dict:\n for dir_type in self.dirs[key].keys():\n create_if_not_exists(self.dirs[key][dir_type])\n else:\n create_if_not_exists(self.dirs[key])\n\n self.inputFileIds = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info['use_it']:\n continue\n\n process_name = sample_info[\"process_name_specific\"]\n is_mc = (sample_info[\"type\"] == \"mc\")\n\n if not is_mc:\n continue\n\n logging.info(\"Creating configuration files to run '%s' for sample %s\" % (self.executable, process_name))\n\n inputFileList = generateInputFileList(sample_info, self.max_files_per_job)\n key_dir = getKey(process_name)\n\n outputFile = os.path.join(\n self.dirs[key_dir][DKEY_HISTO], \"%s.root\" % process_name\n )\n self.outputFiles[process_name] = {\n 'inputFiles' : [],\n 'outputFile' : outputFile,\n }\n if os.path.isfile(outputFile) and tools_is_file_ok(outputFile, min_file_size = 2000):\n logging.info('File {} already exists --> skipping job'.format(outputFile))\n continue\n\n for jobId in inputFileList.keys():\n\n key_file = getKey(sample_name, jobId)\n\n self.inputFiles[key_file] = inputFileList[jobId]\n if len(self.inputFiles[key_file]) == 0:\n logging.warning(\n \"'%s' = %s --> skipping job !!\" % (key_file, self.inputFiles[key_file])\n )\n continue\n\n self.cfgFiles_projection[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_CFGS], \"project_%s_%i_cfg.txt\" % (process_name, jobId)\n )\n self.outputFiles_tmp[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_HISTO_TMP], \"histogram_%i.root\" % jobId\n )\n self.logFiles_projection[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_LOGS], \"project_%s_%i.log\" % (process_name, jobId)\n )\n self.scriptFiles_projection[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_CFGS], \"project_%s_%i_cfg.sh\" % (process_name, jobId)\n )\n projection_module = self.projection_module\n if projection_module == \"count\":\n projection_module = \"countHistogramAll\"\n if sample_name.startswith('/TTTo'):\n projection_module += \"CompTopRwgt\"\n elif sample_info['sample_category'].startswith('ttH'):\n projection_module += \"CompHTXS\"\n elif isSplitByNlheJet(process_name):\n projection_module += \"SplitByLHENjet\"\n elif isSplitByNlheHT(process_name):\n projection_module += \"SplitByLHEHT\"\n elif isSplitByNlheJetHT(process_name, sample_name):\n projection_module += \"SplitByLHENjetHT\"\n self.jobOptions_sbatch[key_file] = {\n 'histName' : process_name,\n 'inputFiles' : self.inputFiles[key_file],\n 'cfgFile_path' : self.cfgFiles_projection[key_file],\n 'outputFile' : self.outputFiles_tmp[key_file],\n 'logFile' : self.logFiles_projection[key_file],\n 'scriptFile' : self.scriptFiles_projection[key_file],\n 'projection_module' : projection_module,\n }\n if self.projection_module != 'puHist':\n self.jobOptions_sbatch[key_file]['ref_genWeight'] = self.ref_genWeights[process_name]\n if process_name not in self.ref_genWeights:\n raise RuntimeError(\"Unable to find reference LHE weight for process %s\" % process_name)\n self.createCfg_project(self.jobOptions_sbatch[key_file])\n self.outputFiles[process_name]['inputFiles'].append(self.outputFiles_tmp[key_file])\n\n if self.is_sbatch:\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable)\n self.num_jobs['project'] += self.createScript_sbatch(\n self.executable, self.sbatchFile_projection, self.jobOptions_sbatch\n )\n\n logging.info(\"Creating Makefile\")\n lines_makefile = []\n self.addToMakefile_project(lines_makefile)\n self.addToMakefile_hadd(lines_makefile)\n if self.plot:\n self.addToMakefile_plot(lines_makefile)\n self.addToMakefile_finalHadd(lines_makefile)\n self.createMakefile(lines_makefile)\n logging.info(\"Done\")\n\n return self.num_jobs",
"def _setup_shared_list(self):\n self._manager = Manager().__enter__()\n self._shared_list = self._manager.list()\n return self",
"def test_sync_3(self):\n dir0, dir1, dir2 = self.make_temp_dirs(3)\n self.write_file(dir0, \"foo\", \"bar\")\n self.sync_all()\n\n self.assertFile(dir0, \"foo\", \"bar\")\n self.assertFile(dir1, \"foo\", \"bar\")\n self.assertFile(dir2, \"foo\", \"bar\")",
"def open(self):\n self.close() \n self.readDataFromFile() \n self.resetWriteCount()\n \n taskMgr.remove('%s-syncTask'%(self.className,))\n t = taskMgr.add(self.syncTask,'%s-syncTask'%(self.className,))\n t.timeElapsed = 0.0",
"def test_sync_2_then_3(self):\n dir0, dir1 = self.make_temp_dirs(2)\n self.write_file(dir0, \"foo\", \"bar\")\n self.sync_all()\n self.assertFile(dir0, \"foo\", \"bar\")\n self.assertFile(dir1, \"foo\", \"bar\")\n\n dir2 = self.make_temp_dir()\n self.sync_all()\n self.assertFile(dir0, \"foo\", \"bar\")\n self.assertFile(dir1, \"foo\", \"bar\")\n self.assertFile(dir2, \"foo\", \"bar\")",
"def __fake_data__(self):\n\n # Set directory for configuration files\n self.configFilePath = q.system.fs.joinPaths(q.dirs.varDir, 'tftproot')\n \n # Add some share's\n for i in xrange(3):\n share = NFSShare()\n share.name = 'share-%s' % q.base.idgenerator.generateRandomInt(0, 255)\n self.shares[share.name] = share",
"async def sync_pairs(self):\n\n self.watch_only_pairs = []\n\n await self._handle_trader_watch_pairs()\n await self._handle_balancer_watch_pairs()\n\n for pair in self.market.pairs + self.market.extra_base_pairs:\n await self.prepare_trades(pair)\n await self.prepare_states(pair)\n await self.prepare_last_trades(pair)\n\n await self.prepare_all_trade_stats()\n await self.balancer.sync_pairs()",
"def _sync_seeds(self, mode: str, src: str, dest: str, excludes: List[str] = []) -> None:\n\n if not mode in [\"GET\", \"PUSH\"]:\n raise FuzzFrontendError(f\"Unknown mode for seed syncing: `{mode}`\")\n\n rsync_cmd: List[str] = [\"rsync\", \"-racz\", \"--ignore-existing\"]\n\n # subclass should invoke with list of pattern ignores\n if len(excludes) > 0:\n rsync_cmd += [f\"--exclude={e}\" for e in excludes]\n\n if mode == \"GET\":\n rsync_cmd += [dest, src]\n elif mode == \"PUSH\":\n rsync_cmd += [src, dest]\n\n L.debug(\"rsync command: %s\", rsync_cmd)\n try:\n subprocess.Popen(rsync_cmd)\n except subprocess.CalledProcessError as e:\n raise FuzzFrontendError(f\"{self.name} run interrupted due to exception {e}.\")",
"def syncfolder():",
"def register_gridfs_files(self, **kwargs):\n d = {}\n for k, v in kwargs.items():\n mode = \"b\"\n if isinstance(v, (list, tuple)): v, mode = v\n d[k] = GridFsFile(path=v, mode=mode)\n\n self[\"files\"].update(d)\n return self",
"def __init__(self, conf_files):\n\n APIManager.__max_size_csv()\n\n self.all_conf = OrderedDict()\n self.base_url = []\n for conf_file in conf_files:\n conf = OrderedDict()\n tp = None\n conf_json = HashFormatHandler().read(conf_file)\n base_url = None\n addon = None\n for item in conf_json:\n if base_url is None:\n base_url = item[\"url\"]\n self.base_url.append(item[\"url\"])\n website = item[\"base\"]\n tp = item[\"endpoint\"]\n if \"addon\" in item:\n addon_abspath = abspath(\n dirname(conf_file) + sep + item[\"addon\"]\n )\n path.append(dirname(addon_abspath))\n addon = import_module(basename(addon_abspath))\n sparql_http_method = \"post\"\n if \"method\" in item:\n sparql_http_method = item[\"method\"].strip().lower()\n else:\n conf[APIManager.nor_api_url(item, base_url)] = item\n\n self.all_conf[base_url] = {\n \"conf\": conf,\n \"tp\": tp,\n \"conf_json\": conf_json,\n \"base_url\": base_url,\n \"website\": website,\n \"addon\": addon,\n \"sparql_http_method\": sparql_http_method,\n }",
"def _init_files_dirs(self):\n self.local.create_files_dirs()\n self.remote.create_files_dirs()",
"def create(self):\n\n for key in self.dirs.keys():\n if type(self.dirs[key]) == dict:\n for dir_type in self.dirs[key].keys():\n create_if_not_exists(self.dirs[key][dir_type])\n else:\n create_if_not_exists(self.dirs[key])\n\n # read the file in, sample-by-sample\n # build the dictionary recursively\n # add rle file also to generated cfg files\n # print integrations per job as well!\n # consider more than 1 file per jobs -- the jobs are splitted by MEM integration anyways\n\n rle_filters = self.get_filter() if self.rle_filter_file else {}\n statistics = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"]:\n continue\n\n if not os.path.exists(sample_info['local_paths'][0]['path']):\n logging.warning(\"Skipping sample {sample_name}\".format(sample_name = sample_name))\n continue\n\n process_name = sample_info[\"process_name_specific\"]\n logging.info(\"Creating configuration files to run '%s' for sample %s\" % (self.executable_addMEM, process_name))\n is_mc = (sample_info[\"type\"] == \"mc\")\n if self.rle_filter_file:\n assert(process_name in rle_filters)\n\n inputFileList = generateInputFileList(sample_info, self.max_files_per_job)\n # typically, the analysis ends here and starts looping b/c the smallest unit of work processes\n # at least one file; we need, however, to split the file into event ranges in such a way that\n # each job performs mem_integrations_per_job MEM integrations\n\n # so what we are going to do is to open each set of files in inputFileList, read the variable\n # requestMEM_*l_*tau and try to gather the event ranges such that each event range\n # performs up to mem_integrations_per_job integrations per job\n memEvtRangeDict = self.memJobList(inputFileList, rle_filters[process_name] if self.rle_filter_file else [])\n\n for jobId in memEvtRangeDict.keys():\n\n key_dir = getKey(sample_name)\n key_file = getKey(sample_name, jobId)\n\n self.inputFiles[key_file] = memEvtRangeDict[jobId]['input_fileset']\n\n # there should always be a job\n assert(self.inputFiles[key_file] > 0), \"More than one input file: %s ?? !!\" % \\\n ', '.join(self.inputFiles[key_file])\n\n #assert(len(self.inputFiles[key_file]) == 1), \"There is more than one input file!\"\n self.cfgFiles_addMEM_modified[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_CFGS], \"addMEM_%s_%s_%i_cfg.py\" % (self.channel, process_name, jobId)\n )\n self.shFiles_addMEM_modified[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_CFGS], \"addMEM_%s_%s_%i.sh\" % (self.channel, process_name, jobId)\n )\n self.outputFiles[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_NTUPLES], \"%s_%i.root\" % (process_name, jobId)\n )\n self.logFiles_addMEM[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_LOGS], \"addMEM_%s_%s_%i.log\" % (self.channel, process_name, jobId)\n )\n self.logFiles_addMEM[key_file] = get_log_version((self.logFiles_addMEM[key_file],))[0]\n self.createCfg_addMEM(\n self.inputFiles[key_file],\n memEvtRangeDict[jobId]['event_range'][0],\n memEvtRangeDict[jobId]['event_range'][1],\n self.outputFiles[key_file],\n self.era,\n sample_info[\"sample_category\"],\n is_mc,\n self.cfgFiles_addMEM_modified[key_file],\n memEvtRangeDict[jobId]['whitelist'],\n )\n\n # associate the output file with the fileset_id\n #UDPATE: ONE OUTPUT FILE PER SAMPLE!\n fileset_id = memEvtRangeDict[jobId]['fileset_id']\n hadd_output_dir = os.path.join(\n self.dirs[key_dir][DKEY_FINAL_NTUPLES],\n '%04d' % (fileset_id // 1000)\n )\n if not os.path.exists(hadd_output_dir):\n os.makedirs(hadd_output_dir)\n hadd_output = os.path.join(\n hadd_output_dir, '%s_%i.root' % ('tree', fileset_id) # UDPATE: ADDED\n #hadd_output_dir, \"tree.root\" # UDPATE: REMOVED\n )\n if hadd_output not in self.hadd_records:\n self.hadd_records[hadd_output] = {}\n self.hadd_records[hadd_output]['output_files'] = []\n self.hadd_records[hadd_output]['fileset_id'] = fileset_id\n self.hadd_records[hadd_output]['output_files'].append(self.outputFiles[key_file])\n self.hadd_records[hadd_output]['process_name'] = process_name\n\n # let's sum the number of integration per sample\n nofEntriesMap = {}\n for v in memEvtRangeDict.values():\n if v['fileset_id'] not in nofEntriesMap:\n nofEntriesMap[v['fileset_id']] = {\n 'nof_entries' : v['nof_entries'],\n }\n statistics[process_name] = {\n 'nof_int' : sum([entry['nof_int'] for entry in memEvtRangeDict.values()]),\n 'nof_entries' : sum([entry['nof_entries'] for entry in nofEntriesMap.values()]),\n 'nof_events_pass' : sum([entry['nof_events_pass'] for entry in memEvtRangeDict.values()]),\n 'nof_int_pass' : sum([entry['nof_int_pass'] for entry in memEvtRangeDict.values()]),\n 'nof_zero' : sum([entry['nof_zero'] for entry in memEvtRangeDict.values()]),\n 'nof_jobs' : len(memEvtRangeDict),\n }\n\n if self.is_sbatch:\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable_addMEM)\n self.createScript_sbatch()\n\n logging.info(\"Creating Makefile\")\n lines_makefile = []\n self.addToMakefile_addMEM(lines_makefile)\n self.addToMakefile_hadd(lines_makefile)\n self.createMakefile(lines_makefile)\n\n ws_len = max([len(kk) + 1 for kk in statistics.keys()])\n total_nof_integrations_sum = sum(x['nof_int'] for x in statistics.values())\n total_nof_entires = sum(x['nof_entries'] for x in statistics.values())\n total_nof_zero_int = sum(x['nof_zero'] for x in statistics.values())\n total_nof_jobs = sum(x['nof_jobs'] for x in statistics.values())\n total_nof_pass = sum(x['nof_events_pass'] for x in statistics.values())\n total_nof_int_pass_avg = float(sum(x['nof_int_pass'] for x in statistics.values())) / total_nof_pass\n total_nof_integrations_avg = float(total_nof_integrations_sum) / total_nof_entires\n total_nof_int_per_job = float(total_nof_integrations_sum) / total_nof_jobs\n for k, v in statistics.iteritems():\n if v['nof_entries'] == 0:\n int_per_event = 0.\n evt_pass = 0.\n else:\n int_per_event = float(v['nof_int']) / v['nof_entries']\n evt_pass = (100 * float(v['nof_events_pass']) / v['nof_entries'])\n if v['nof_events_pass'] == 0:\n nof_int_pass = 0.\n else:\n nof_int_pass = float(v['nof_int_pass']) / v['nof_events_pass']\n print('%s%s: %d (%d entries; %d jobs; %.2f int/evt; %d (%.2f%%) evt pass; %.2f int/evt pass; %d evt 0int)' %\n (k,\n ' ' * (ws_len - len(k)),\n v['nof_int'],\n v['nof_entries'],\n v['nof_jobs'],\n int_per_event,\n v['nof_events_pass'],\n evt_pass,\n nof_int_pass,\n v['nof_zero'],\n )\n )\n print('%s%s: %d (%d entries; %d jobs; %.2f int/evt; %d evt pass; %.2f int/evt pass; '\n '%.2f int/job pass; %d evt 0int)' %\n ('total',\n ' ' * (ws_len - len('total')),\n total_nof_integrations_sum,\n total_nof_entires,\n total_nof_jobs,\n total_nof_integrations_avg,\n total_nof_pass,\n total_nof_int_pass_avg,\n total_nof_int_per_job,\n total_nof_zero_int,\n )\n )\n\n if self.max_mem_integrations > 0 and total_nof_integrations_sum > self.max_mem_integrations:\n logging.error(\"Will not start the jobs (max nof integrations exceeded)!\")\n return False\n else:\n logging.info(\"Done\")\n return True",
"def _save_sync_list(self):\n\t\tfp = open(self.sync_file, 'w')\n\t\tself.sync_list.write(fp)\n\t\tfp.close()",
"def instances(self):\n for d in os.listdir(self.directory):\n yield self.instance(self.directory, d)"
] | [
"0.6529905",
"0.6203876",
"0.6001982",
"0.5916084",
"0.56833124",
"0.5660356",
"0.56500673",
"0.55826265",
"0.55749094",
"0.5474997",
"0.546527",
"0.5450697",
"0.53788894",
"0.53397506",
"0.5328996",
"0.530742",
"0.52719736",
"0.5265014",
"0.52415264",
"0.52197003",
"0.52047336",
"0.5190559",
"0.5189377",
"0.5182658",
"0.5171932",
"0.5150166",
"0.5143609",
"0.51259637",
"0.5111977",
"0.51029915"
] | 0.6603992 | 0 |
Kill unison instance by it's PID. Includes builtin protection for accidentally killing a nonunison program, and even other unison programs not started with this script. This ensures that this function will never kill a PID that we have not started with unisonctrl. Paramaters int pid to kill must be a PID started in this process Throws none Returns none Throws none Doctests | def kill_sync_instance_by_pid(self, pid):
# Get the list of known pids to ensure we only kill one of those
running_data = self.data_storage.running_data
self.logger.debug(
"Attempting to kill PID '" + str(pid) + "'"
)
known_pids = []
# Gets PIDs of all the known unison processes
known_pids = [int(running_data[d]['pid']) for d in running_data]
# TODO: Rewrite this function, it can probably be done with reduce()
# RESOLUTION: Rewritten above, this kept in case it doesn't work
# for entry in running_data:
# running_data[entry]
# known_pids.append(int(running_data[entry]['pid']))
# TODO: Finish this error checking logic here, currently it doesn't check the PID
# Try and kill with sigint (same as ctrl+c), if we are allowed to
# First make sure the process exists
if not psutil.pid_exists(pid):
self.logger.info(
"PID " + str(pid) + " was not found. Perhaps already dead?"
)
return
# Then make sure it's a process we started
elif pid not in known_pids:
shortmsg = (
"PID #" + str(pid) + " is not managed by UnisonCTRL. " +
"Refusing to kill. See logs for more information."
)
longmsg = (
"PID #" + str(pid) + " is not managed by UnisonCTRL. " +
"Refusing to kill. Your data files are likely corrupted. " +
"Kill all running unison instances on this system, " +
"delete everything in '" + self.config['running_data_dir'] +
"/*', and run UnisonCTRL again."
)
self.logger.critical(longmsg)
raise RuntimeError(shortmsg)
# Finally, kill the process if it exists and we started it
else:
return self.kill_pid(pid) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def kill(pid):\n p = psutil.Process(pid)\n\n try:\n p.kill()\n except Exception:\n pass",
"def kill_pid(pid):\n try:\n # Unable to import 'module'\n # pylint: disable=no-member,F0401\n import signal\n return os.kill(pid, signal.SIGTERM)\n except ImportError:\n pass",
"def kill(pid):\n # If the process doesn't exist, it raises an exception that we can ignore.\n try:\n os.kill(pid, signal.SIGKILL)\n except OSError:\n pass",
"def processKill(uPid):\n return processTerminate(uPid);",
"def _KillProcess(self, pid):\n if sys.platform.startswith('win'):\n process_terminate = 1\n handle = ctypes.windll.kernel32.OpenProcess(\n process_terminate, False, pid)\n ctypes.windll.kernel32.TerminateProcess(handle, -1)\n ctypes.windll.kernel32.CloseHandle(handle)\n\n else:\n try:\n os.kill(pid, signal.SIGKILL)\n except OSError as exception:\n logger.error('Unable to kill process {0:d} with error: {1!s}'.format(\n pid, exception))",
"def kill_process(self,PID):\n os.system(\"sudo kill {}\".format(PID))\n return True",
"def _kill(self) -> None:\n if not hasattr(self, \"proc\"):\n raise FuzzFrontendError(\"Attempted to kill non-running PID.\")\n\n self.proc.terminate()\n try:\n self.proc.wait(timeout=0.5)\n L.info(\"Fuzzer subprocess exited with `%d`\", self.proc.returncode)\n except subprocess.TimeoutExpired:\n raise FuzzFrontendError(\"Subprocess could not terminate in time\")\n\n self._on = False",
"def kill(proc_pid: int) -> None:\n\n if not psutil.pid_exists(proc_pid):\n return\n\n process = psutil.Process(proc_pid)\n\n for proc in process.children(recursive=True):\n proc.kill()\n\n process.kill()",
"def kill_child(self, pid):\n # try communicate\n try:\n self._child_pids[pid].communicate()\n except Exception:\n print('Could not communicate to child')\n try:\n self.execute_command(\"kill -9 \"+str(pid))\n except Exception as e:\n print(e)",
"def kill_pid(self, pid):\n # Ensure it still exists before continuing\n if not psutil.pid_exists(pid):\n return\n\n # If it did not die nicely, get stronger about killing it\n p = psutil.Process(pid)\n\n # Try terminating, wait 3 seconds to see if it dies\n p.terminate() # SIGTERM\n psutil.wait_procs([p], timeout=3)\n\n # Ensure it still exists before continuing\n if not psutil.pid_exists(pid):\n self.logger.debug(\n \"PID \" + str(pid) + \" was killed with SIGTERM successfully.\"\n )\n return\n\n # Try hard killing, wait 3 seconds to see if it dies\n p.kill() # SIGKILL\n psutil.wait_procs([p], timeout=3)\n\n self.logger.info(\n \"PID \" + str(pid) + \" could not be killed with SIGTERM, and \" +\n \"was killed with SIGKILL.\"\n )\n\n return",
"def _sudoKillSubprocessFromPid(self, pid, log = True, force = False, timeout = 1):\n\n if self._logger is not None:\n self._logger.info('Sending SIGINT to slave PID ' + str(pid))\n args = ['sudo', 'kill', '-SIGINT', str(pid)] # Send Ctrl+C to slave DHCP client process\n subprocess.call(args, stdout=open(os.devnull, 'wb'), stderr=subprocess.STDOUT)\n \n if force:\n while self._checkPid(pid): # Loop if slave process is still running\n time.sleep(0.1)\n timeout -= 0.1\n if timeout <= 0: # We have reached timeout... send a SIGKILL to the slave process to force termination\n if self._logger is not None:\n self._logger.info('Sending SIGKILL to slave PID ' + str(pid))\n args = ['sudo', 'kill', '-SIGKILL', str(pid)] # Send Ctrl+C to slave DHCP client process\n subprocess.call(args, stdout=open(os.devnull, 'wb'), stderr=subprocess.STDOUT)\n break",
"def killPid(pid, force=False):\n if pid < 0:\n return False\n\n try:\n os.kill(pid, signal.SIGKILL if force else signal.SIGTERM)\n except OSError:\n return False\n else:\n return True",
"def kill_process_by_pid(duthost, container_name, program_name, program_pid):\n kill_cmd_result = duthost.shell(\"docker exec {} kill -SIGKILL {}\".format(container_name, program_pid))\n\n # Get the exit code of 'kill' command\n exit_code = kill_cmd_result[\"rc\"]\n pytest_assert(exit_code == 0, \"Failed to stop program '{}' before test\".format(program_name))\n\n logger.info(\"Program '{}' in container '{}' was stopped successfully\"\n .format(program_name, container_name))",
"def safe_kill(pid):\n try:\n return os.kill(pid, signal.SIGKILL)\n except OSError as e:\n if e.errno == errno.ESRCH:\n # Raced with process termination\n pass\n else:\n raise",
"def kill(self, pid, returncode):\r\n kernel32 = ctypes.windll.kernel32\r\n handle = kernel32.OpenProcess(1, 1, pid)\r\n ret = kernel32.TerminateProcess(handle, returncode)\r\n kernel32.CloseHandle(handle)\r\n return (0 != ret)",
"def kill_process(pid, exit_code=None):\n\n if exit_code is None:\n exit_code = DEFAULT_TERMINATION_EXIT_CODE\n\n try:\n handle = win32api.OpenProcess(win32con.PROCESS_TERMINATE, 0, pid)\n except pywintypes.error:\n return False # \"The parameter is incorrect.\"\n\n if not handle:\n return False\n\n try:\n win32api.TerminateProcess(handle, exit_code)\n return True\n except pywintypes.error:\n return False # \"Access is denied.\"\n finally:\n win32api.CloseHandle(handle)",
"def kill(pid, sig=signal.SIGTERM.value):\n pid = int(pid)\n sig = int(sig)\n proc = psutil.Process(pid)\n try:\n proc.send_signal(sig)\n return True\n except Exception as e:\n raise j.exceptions.RuntimeError(\"Could not kill process with id %s.\\n%s\" % (pid, e))",
"def _TerminateProcessByPid(self, pid):\n self._RaiseIfNotRegistered(pid)\n\n process = self._processes_per_pid[pid]\n\n self._TerminateProcess(process)\n self._StopMonitoringProcess(process)",
"def kill_process(process):\n \n if process == None:\n print(\"No process to kill.\")\n pass\n else:\n os.killpg(os.getpgid(process.pid), signal.SIGTERM)\n process = None\n print(\"Process killed.\")\n return None",
"def check_pid(pid):\n try:\n os.kill(pid, 0)\n except OSError as ex:\n template = \"An exception of type {0} occured.\\nArguments:\\n{1!r}\"\n message = template.format(type(ex).__name__, ex.args)\n print (message)\n return False\n else:\n return True",
"def Kill(cls, pid, children=False):\n\t\tif pid is not None:\n\t\t\tif children:\n\t\t\t\tfor cpid, _, cmd in cls.Children(pid):\n\t\t\t\t\t# We need to recursively kill the childrens\n\t\t\t\t\tcls.Kill(cpid, children=True)\n\t\t\tLogger.Info(\"Killing process: \" + repr(pid))\n\t\t\treturn popen(\"kill -9 %s\" % (pid))\n\t\telse:\n\t\t\treturn None",
"def try_kill_process(proc):\n pid = proc.pid\n LOG.info(\"Killing process %s\" % pid)\n try:\n os.kill(pid, signal.SIGKILL)\n except Exception:\n LOG.exception(\"Failed to kill %s\" % pid)",
"def kill_friend(pid, delay=0):\n sleep(delay)\n try:\n os.kill(pid, SIGKILL)\n except (PermissionError, ProcessLookupError) as e:\n if psutil.pid_exists(pid):\n util.debug(\"Fail to kill an alive process?!?\")\n raise e\n util.debug(\"process {} was already dead\".format(pid))",
"def stop(self, kill=False):\n if not self._process:\n raise JubaTestFixtureFailedError('this instance has not been started yet')\n\n try:\n if kill:\n log.debug('KILLing process')\n self._process.kill()\n else:\n log.debug('terminating process')\n self._process.terminate()\n except OSError as e:\n if e.errno != errno.ESRCH: # \"No such process\"\n raise e\n # may be a race between poll and signal; just ignore\n log.debug('race between poll and signal detected')\n finally:\n (self.stdout, self.stderr) = self._process.communicate()\n self._process = None",
"def kill(self, id):",
"def kill(self, id):",
"def killIfRequested(self):\n pidFilePath = self.options.get(RunnerOptions.pidFilePath)\n\n if self.options.get(RunnerOptions.kill, False):\n if pidFilePath is None:\n exit(ExitStatus.EX_USAGE, \"No PID file specified\")\n return # When testing, patched exit doesn't exit\n else:\n pid = \"\"\n try:\n for pid in pidFilePath.open():\n break\n except EnvironmentError:\n exit(ExitStatus.EX_IOERR, \"Unable to read PID file.\")\n return # When testing, patched exit doesn't exit\n try:\n pid = int(pid)\n except ValueError:\n exit(ExitStatus.EX_DATAERR, \"Invalid PID file.\")\n return # When testing, patched exit doesn't exit\n\n self.startLogging()\n self.log.info(\"Terminating process: {pid}\", pid=pid)\n\n kill(pid, SIGTERM)\n\n exit(ExitStatus.EX_OK)\n return # When testing, patched exit doesn't exit",
"def _kill_process(self, box_config):\n try:\n self.logger.info(f'kill: {box_config.process_name} {{')\n self.logger.info(f'target process pid={box_config.pid}')\n if box_config.pid and psutil.pid_exists(box_config.pid):\n p = psutil.Process(box_config.pid)\n p.kill()\n p.wait()\n box_config.pid = None\n self.bc_dao.update(box_config)\n remove_pid_file(box_config.process_name)\n except Exception:\n self.logger.error(f'Exception on killing: {box_config.process_name}', exc_info=True)\n finally:\n self.logger.info('}')",
"def pkill(process_name):\n try:\n killed = os.system('taskkill /im ' + process_name)\n except Exception:\n killed = 0\n return killed",
"def kill_nicely(pid, is_our_child):\n\n is_dead = is_our_child and is_really_dead or is_probably_dead \n\n def kill(sig):\n try:\n os.kill(pid, sig)\n except OSError, exc:\n log.error(str(exc))\n raise SystemExit(1)\n\n kill(signal.SIGTERM)\n if is_dead(pid):\n return True\n\n log.error(\"%d still going; resending SIGTERM\" % pid)\n kill(signal.SIGTERM)\n if is_dead(pid):\n return True\n\n log.critical(\"%d STILL going; sending SIGKILL\" % pid)\n kill(signal.SIGKILL)\n if not is_dead(pid):\n msg = \"%d STILL GOING FIVE SECONDS AFTER SIGKILL; I give up.\" % pid\n log.critical(msg)\n return False"
] | [
"0.7319361",
"0.7115674",
"0.7110478",
"0.70291764",
"0.6886039",
"0.6852084",
"0.67194027",
"0.67028004",
"0.6644425",
"0.6624735",
"0.6603372",
"0.6584298",
"0.65174264",
"0.64720327",
"0.6353765",
"0.6335386",
"0.63055",
"0.6287493",
"0.6258707",
"0.6181982",
"0.61499745",
"0.60681427",
"0.59906036",
"0.5942084",
"0.59415597",
"0.59415597",
"0.5940627",
"0.59358376",
"0.59357995",
"0.59277034"
] | 0.72457254 | 1 |
Ensure all expected processes are still running. Checks the running_data list against the current PID list to ensure all expected processes are still running. Note that if everything works as expected and does not crash, there should never be dead instances. As such, if dead instances appear on a regular basis, consider digging into why they are appearing. | def cleanup_dead_processes(self):
# Get the list of processes we know are running and we think are running
# Also, convert each PID to int to make sure we can compare
actually_running_processes = self.get_running_unison_processes()
l = self.data_storage.running_data
supposedly_running_processes = [int(l[d]['pid']) for d in l]
# Find which instances we think are running but aren't
dead_instances = [x for x in supposedly_running_processes if x not in actually_running_processes]
# Note: if nothing crashes, dead instances should never exist.
if(len(dead_instances) > 0):
self.logger.warn(
"Found " + str(len(dead_instances)) + " unexpected dead " +
"instances. Cleaning up data files now."
)
else:
self.logger.debug(
"Found " + str(len(dead_instances)) + " unexpected dead " +
"instances to clean up."
)
# Remove data on dead instances
for instance_id in dead_instances:
process = self.get_process_info_by_pid(instance_id)
self.logger.debug(
"Removing data on '" + str(process['syncname']) + "' " +
"because it is not running as expected."
)
self.data_storage.remove_data(process['syncname']) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def detect_instance_pids_still_alive(self):\n missing_instances = []\n running_pids = psutil.pids()\n for instance in self.all_instances:\n if instance.pid not in running_pids:\n missing_instances.append(instance)\n\n if len(missing_instances) > 0:\n logging.error(\n \"Not all instances are alive. The following are not running: %s\",\n str(missing_instances),\n )\n logging.error(get_process_tree())\n raise Exception(\"instances missing: \" + str(missing_instances))\n instances_table = get_instances_table(self.get_instance_essentials())\n logging.info(\"All arangod instances still running: \\n%s\", str(instances_table))\n attach_table(instances_table, \"Instances table\")",
"def check_running(self):\n remove = []\n\n # iterate over all \"running\" processes\n for proc in self.processes:\n # if the process has stopped\n if proc['proc'].poll() is not None:\n if proc['type'] == 'rtmpdump':\n self.logger.info(\n proc['model'] + \" is no longer being captured\")\n if os.path.isfile(proc['filename']):\n proc_stats = self.get_proc_stats(proc)\n if proc_stats['file_size'] == 0:\n self.logger.warning(\"Capture size is 0kb, deleting.\")\n os.remove(proc['filename'])\n else:\n self.move_to_complete(proc)\n message = (\"Finished:\" +\n proc['model'] + \" - \" +\n \"Started at \" +\n proc_stats['started_at'] + \" | \" +\n \"Size:\" +\n proc_stats['formatted_file_size'] + \" | \" +\n \"Duration:\" +\n proc_stats['recording_time'])\n self.logger.info(message)\n if self.push_bullet is not None:\n self.push_bullet.push_note(\"Chaturbate\", message)\n elif proc['type'] == 'ffmpeg':\n if proc['proc'].poll() == 0:\n os.remove(proc['source'])\n else:\n self.logger.warning(\"Something went wrong with ffmpeg, not deleting\")\n\n remove.append(proc['id'])\n\n # remove all items in remove from self.processes\n procs = self.processes\n for item in remove:\n procs = [f for f in procs if f['id'] != item]\n self.processes = procs",
"def _kill_running_processes(self):\n # Kill any rouge processes that are still running.\n with _thread_lock:\n killed = []\n for pid in self._pids:\n try:\n os.kill(pid, _KILLED_BY_ANYPYTOOLS)\n killed.append(str(pid))\n except:\n pass\n self._pids.clear()",
"def monitor(self):\n for idx, process in enumerate(self.__process_list):\n process.id_number = idx + 1\n while len(self.__process_list) > 0:\n for process in list(self.__process_list):\n if not process.has_output():\n _return_code = process.return_code\n self.__process_list.remove(process)\n if _return_code == 0:\n logger.info(\"Finished process #{}: there are now {}/{} running\".format(process.id_number, len(self.__process_list), self.__n_initial))\n else:\n logger.warning(\"Process #{} terminated unexpectedly (return code {}): there are now {}/{} running\".format(process.id_number, _return_code, len(self.__process_list), self.__n_initial))",
"def test_all_workers_are_active_processes(self):\n WORKERS_COUNT = 10\n\n # Testing only ProcessPool since only it has the mechanism that waits for all workers to come online before\n # start finishes\n pool = ProcessPool(WORKERS_COUNT)\n\n pool.start(WorkerIdGeneratingWorker)\n for _ in range(100):\n pool.ventilate()\n\n active_worker_ids = [pool.get_results() for _ in range(100)]\n self.assertEqual(set(range(WORKERS_COUNT)), set(active_worker_ids))\n\n pool.stop()\n pool.join()",
"def check_processes(process_list):\n running = 1 # 0 when the subprocesses are all done\n while running:\n for proc in process_list:\n proc.poll()\n if proc.returncode == 1:\n raise RuntimeError(\"Program \" +\n \"number \" +\n \"{}\".format(process_list.index(proc)) +\n \" failed.\")\n running = bool(sum([int(proc.returncode) for proc in process_list]))\n return True",
"def check_parent_processes_alive():\n cur_process = psutil.Process()\n parent = cur_process.parent()\n while True:\n time.sleep(1)\n if not parent.is_running():\n break\n\n logger.warning(\"Parent process is terminated abnormally. Process exits.\")\n cur_process.kill()",
"def refresh_pids(active_pids, resources):\n still_active_pids = []\n no_change = True\n for info in active_pids:\n pid, gpu, title, cmd, lock_path = info\n if still_active(pid, cmd):\n still_active_pids.append(info)\n else:\n print(f\"[{time.strftime(time.ctime())}] {title} seems to be over.\")\n os.remove(lock_path)\n resources.free(gpu=gpu)\n no_change = False\n return still_active_pids, no_change",
"def check_launcher():\n\n # Storage in memory which holds info about currently running checks\n storage = {}\n\n # Storage in memory which holds process info: process id and project objects\n processes = {}\n\n # Close previously opened connections (if the exist)\n django.db.connections.close_all()\n\n while True:\n # Making Copy in order to compare updates in data base\n new_storage = copy.deepcopy(storage)\n\n # Fetch data from database\n check_sync(new_storage)\n\n # Get storage keys in order to compare storages for changes\n old_keys = set(storage.keys())\n new_keys = set(new_storage.keys())\n\n # Get keys of elements in init storage and updated storage\n added_checks = new_keys.difference(old_keys)\n deleted_checks = old_keys.difference(new_keys)\n common_checks = new_keys.intersection(old_keys)\n\n # Launch new processes\n for check_id in added_checks:\n # Spawn new process with name Process#id, where id = check_id\n start_process(check_id, new_storage, processes)\n\n # Stop (kill) deleted check's prorcesses\n for check_id in deleted_checks:\n stop_process(check_id, storage, processes)\n\n for check_id in common_checks:\n if storage[check_id] != new_storage[check_id]:\n stop_process(check_id, storage, processes)\n # Spawn new process with name Process#id, where id = check_id\n start_process(check_id, new_storage, processes)\n\n storage = copy.deepcopy(new_storage)\n time.sleep(30)",
"def ensure_all_critical_processes_running(duthost, containers_in_namespaces):\n for container_name in containers_in_namespaces.keys():\n critical_group_list, critical_process_list, succeeded = duthost.get_critical_group_and_process_lists(container_name)\n pytest_assert(succeeded, \"Failed to get critical group and process lists of container '{}'\".format(container_name))\n\n namespace_ids = containers_in_namespaces[container_name]\n for namespace_id in namespace_ids:\n container_name_in_namespace = container_name\n if namespace_id != DEFAULT_ASIC_ID:\n container_name_in_namespace += namespace_id\n\n for critical_process in critical_process_list:\n # Skip 'dsserve' process since it was not managed by supervisord\n # TODO: Should remove the following two lines once the issue was solved in the image.\n if container_name_in_namespace == \"syncd\" and critical_process == \"dsserve\":\n continue\n\n ensure_process_is_running(duthost, container_name_in_namespace, critical_process)\n\n for critical_group in critical_group_list:\n group_program_info = get_group_program_info(duthost, container_name_in_namespace, critical_group)\n for program_name in group_program_info:\n ensure_process_is_running(duthost, container_name_in_namespace, program_name)",
"def basic_overcloud_processes_running(self):\n for attempt_number in range(600):\n\n try:\n\n for process_name in self.processes_to_check:\n # osp16/python3 process is \"neutron-server:\"\n if process_name == 'neutron-server' and \\\n self.oc_procs_df.query('PROCESS==\"{}\"'.format(\n process_name)).empty:\n process_name = 'neutron-server:'\n # osp17 mysqld process name is mysqld_safe\n if process_name == 'mysqld' and \\\n self.oc_procs_df.query('PROCESS==\"{}\"'.format(\n process_name)).empty:\n process_name = 'mysqld_safe'\n # redis not deployed on osp17 by default, only if some\n # other services such as designate and octavia are deployed\n if (process_name == 'redis-server' and\n not overcloud.is_redis_expected()):\n redis_message = (\"redis-server not expected on OSP 17 \"\n \"and later releases by default\")\n if self.oc_procs_df.query(\n f'PROCESS==\"{process_name}\"').empty:\n LOG.info(redis_message)\n continue\n else:\n raise OvercloudProcessesException(\n process_error=redis_message)\n\n if not self.oc_procs_df.query('PROCESS==\"{}\"'.format(\n process_name)).empty:\n LOG.info(\"overcloud processes status checks: \"\n \"process {} is \"\n \"in running state\".format(process_name))\n continue\n else:\n LOG.info(\"Failure : overcloud processes status checks:\"\n \"process {} is not running \".format(\n process_name))\n raise OvercloudProcessesException(\n process_error=\"process {} is not running \".format(\n process_name))\n # if all procs are running we can return true\n return True\n except OvercloudProcessesException:\n LOG.info('Retrying overcloud processes checks attempt '\n '{} of 360'.format(attempt_number))\n time.sleep(1)\n self.oc_procs_df = overcloud.get_overcloud_nodes_dataframe(\n get_overcloud_node_processes_table)\n # exhausted all retries\n tobiko.fail('Not all overcloud processes are running !\\n')",
"def check_subprocesses(self) : \n for sp_ident in self.active_subprocesses :\n if not os.path.exists(\"%s/%s.rc\" % (self.spool_dir, sp_ident ) ) : continue\n self.finished_subprocesses[sp_ident] = self.get_subprocess_result(sp_ident)\n self.active_subprocesses.pop(sp_ident)",
"def test_workers_die_when_main_process_dies(self):\n manager = Manager()\n return_list = manager.list()\n\n def run_process_pool(return_list):\n pool = ProcessPool(1)\n pool.start(WorkerIdGeneratingWorker)\n return_list.append(pool._workers[0].pid)\n # We dont call pool.stop() and hence leave workers alive\n\n process = Process(target=run_process_pool, args=(return_list,))\n process.start()\n process.join()\n # The worker has now started\n\n worker_pid = return_list[0]\n\n for _ in range(20):\n worker_is_alive = pid_exists(worker_pid)\n if not worker_is_alive:\n break\n time.sleep(0.1)\n self.assertFalse(worker_is_alive)",
"def _wait_for_all_operations_done(self):\n while self._test_names_to_processes:\n time.sleep(10)\n running_test_names = list(self._test_names_to_processes.keys())\n for test_name in running_test_names:\n running_proc = self._test_names_to_processes.get(test_name)\n return_code = running_proc.poll()\n if return_code is not None:\n test_case_state = self._test_names_to_test_states.get(test_name)\n self._handle_failure(running_proc, test_case_state.running_test)\n del self._test_names_to_processes[test_name]\n print('Started validating: {}'.format(test_name))\n test_case_state.running_test.validate_result()\n self._run_test(test_case_state.remaining_tests)",
"def detect_instance_pids(self):\n for instance in self.all_instances:\n instance.detect_pid(\n ppid=self.instance.pid,\n full_binary_path=self.cfg.real_sbin_dir,\n offset=0,\n )\n\n self.show_all_instances()\n self.detect_arangosh_instances(self.cfg, self.cfg.version)",
"def check_all_critical_processes_running(duthost):\n processes_status = duthost.all_critical_process_status()\n for container_name, processes in processes_status.items():\n if processes[\"status\"] is False or len(processes[\"exited_critical_process\"]) > 0:\n return False\n\n return True",
"def get_running_unison_processes(self):\n # Get PIDs\n # Note: throws exception if no instances exist\n try:\n pids = str(subprocess.check_output([\"pidof\", '/usr/bin/unison']))\n\n # Parse command output into list by removing junk chars and exploding\n # string with space delimiter\n pids = pids[2:-3].split(' ')\n\n except subprocess.CalledProcessError:\n # If error caught here, no unison instances are found running\n pids = []\n\n self.logger.debug(\n \"Found \" + str(len(pids)) + \" running instances on this system: PIDs \" +\n \", \".join(pids)\n )\n\n # Return, after converting to ints\n return list(map(int, pids))",
"def get_processes_running():\r\n p = [] #array of processes\r\n if platform == \"linux\" or platform == \"linux2\":\r\n for proc in psutil.process_iter():\r\n try:\r\n tmp=Process(proc.name(),int(proc.pid),proc.username(),int(0),int(0))\r\n p.append(tmp)\r\n except:\r\n continue\r\n return (p)\r\n\t\t\t\r\n tasks = check_output(['tasklist']).decode('cp866', 'ignore').split(\"\\r\\n\")\r\n for task in tasks:\r\n m = re.match(b'(.*?)\\\\s+(\\\\d+)\\\\s+(\\\\w+)\\\\s+(\\\\w+)\\\\s+(.*?)\\\\s.*', task.encode())\r\n if m is not None:\r\n tmp=Process(m.group(1).decode(),int(m.group(2).decode()),m.group(3).decode(),int(m.group(4).decode()),int(m.group(5).decode('ascii', 'ignore')))\r\n p.append(tmp)\r\n #m.group(1).decode() image name\r\n #m.group(2).decode() process id\r\n #m.group(3).decode() session_name\r\n #m.group(4).decode() session_num\r\n #m.group(5).decode('ascii', 'ignore') memory usage\r\n return(p)",
"def wait_for_exec_to_start():\n node_instances = self.client.node_instances.list()\n for ni in node_instances:\n # this will keyerror out (and be retried) if the operation\n # didn't run yet\n pids[ni.node_id] = ni.runtime_properties['pid']",
"def _select_processes(self):\n\n # check if at least one process is running\n is_running = False\n for pid in self.__pids:\n if ProcessMonitor.__is_running(pid):\n is_running = True\n break # at least one process is running\n\n if is_running:\n if not self.__aggregate_multiple_processes:\n return self.__pids\n\n # aggregate metrics, check the last discovered time\n if (\n self.__last_discovered\n and time.time() * 1000 - self.__last_discovered\n < self.__process_discovery_interval * 1000\n ):\n return self.__pids\n\n ps = ProcessList()\n if self.__commandline_matcher:\n self.__last_discovered = time.time() * 1000\n if self.__include_child_processes:\n matched_processes = ps.get_matches_commandline_with_children(\n self.__commandline_matcher\n )\n else:\n matched_processes = ps.get_matches_commandline(\n self.__commandline_matcher\n )\n self.__pids = matched_processes\n\n if not self.__aggregate_multiple_processes and len(self.__pids) > 1:\n # old behaviour where multiple processes were not supported for aggregation\n self._logger.warning(\n \"Multiple processes match the command '%s'. Returning existing pid. \"\n \"You can turn on the multi process aggregation support by adding the \"\n \"aggregate_multiple_processes configuration to true\"\n % self.__commandline_matcher,\n limit_once_per_x_secs=300,\n limit_key=\"linux-process-monitor-existing-pid\",\n )\n self.__pids = [self.__pids[0]]\n else:\n # See if the specified target pid is running. If so, then return it.\n # Special cases:\n # '$$' mean this process.\n # '$$TBD' mean that the PID of the target process has not been determined yet and it will be set later.\n pids = []\n if self.__target_pids:\n for t_pid in self.__target_pids:\n if t_pid == \"$$\":\n t_pid = int(os.getpid())\n\n # skip this until it will be replaced with a real PID.\n elif t_pid == \"$$TBD\":\n continue\n else:\n t_pid = int(t_pid)\n pids.append(t_pid)\n self.__pids = pids\n return self.__pids",
"def setChildPIDs(self):\n\t\tpids = []\n\t\tfor proc in process_iter():\n\t\t\tfor child in self.expectedChildren:\n\t\t\t\tif child == proc.name():\n\t\t\t\t\tif proc.parent().name() == \"Python\": # Hardcoded string comparison. Sue me.\n\t\t\t\t\t\tpids.append(proc.pid)\n\t\tself.pids = pids",
"def kill_processes(self) -> None:\n for process in [p for p in self.processes if p.is_running()]:\n for child in process.children(recursive=True):\n if child.is_running():\n child.kill()\n\n process.kill()",
"def _StopMonitoringProcesses(self):\n # We need to make a copy of the list of pids since we are changing\n # the dict in the loop.\n for pid in list(self._process_information_per_pid.keys()):\n self._RaiseIfNotRegistered(pid)\n process = self._processes_per_pid[pid]\n\n self._StopMonitoringProcess(process)",
"def run_manager(self) -> None:\n \n for p in self.process_list:\n try:\n p.daemon = True\n p.start()\n except:\n self.process_list.remove(p)\n p = Process(target=self.multiprocessing_job, args=(self.process_job,))\n p.daemon = True\n self.process_list.append(p)\n p.start()\n #Every 1 seconds, check for active Processes.\n while True:\n sleep(1)\n running = any(p.is_alive() for p in self.process_list)\n if not running or not active_children:\n self.restart_required = True\n break\n self.logger.info(self.name + \" has finished managing.\")",
"def ovn_overcloud_processes_validations(self):\n if not neutron.has_ovn():\n LOG.info(\"Networking OVN not configured\")\n return True\n\n for process_dict in self.ovn_processes_to_check_per_node:\n if not self.oc_procs_df.query('PROCESS==\"{}\"'.format(\n process_dict['name'])).empty:\n LOG.info(\"overcloud processes status checks: \"\n f\"process {process_dict['name']} is \"\n \"in running state\")\n\n ovn_proc_filtered_df = self.oc_procs_df.query(\n 'PROCESS==\"{}\"'.format(process_dict['name']))\n\n if (process_dict['node_group'] not in\n topology.list_openstack_node_groups()):\n LOG.debug(f\"{process_dict['node_group']} is not \"\n \"a node group part of this Openstack cloud\")\n continue\n node_list = [node.name\n for node in\n topology.list_openstack_nodes(\n group=process_dict['node_group'])]\n node_names_re = re.compile(r'|'.join(node_list))\n node_filter = (ovn_proc_filtered_df.overcloud_node.\n str.match(node_names_re))\n # obtain the processes running on a specific type of nodes\n ovn_proc_filtered_per_node_df = \\\n ovn_proc_filtered_df[node_filter]\n if type(process_dict['number']) == int:\n assert process_dict['number'] == \\\n len(ovn_proc_filtered_per_node_df), (\n \"Unexpected number\"\n f\" of processes {process_dict['name']} running on \"\n f\"{process_dict['node_group']} nodes\")\n elif process_dict['number'] == 'all':\n num_nodes = len(node_list)\n assert num_nodes == len(ovn_proc_filtered_per_node_df), (\n \"Unexpected number of processes \"\n f\"{process_dict['name']} running on \"\n f\"{process_dict['node_group']} nodes\")\n else:\n raise RuntimeError(\"Unexpected value:\"\n f\"{process_dict['node_group']}\")\n # process successfully validated\n LOG.debug(f\"{process_dict['name']} successfully validated on \"\n f\"{process_dict['node_group']} nodes\")\n\n # if all procs are running we can return true\n return True",
"def finalize(self):\n for p in self._processes:\n if p.join(30) is None and p.exitcode is None:\n p.kill()",
"def wait_all_process_done(self) -> None:\n while len(self.process_queue) > 0:\n self.check_process_done()",
"def test_pods_are_not_oomkilled_while_running_ios(self, base_setup):\n pod_objs = base_setup\n\n for pod in pod_objs:\n pod_name = pod.get().get(\"metadata\").get(\"name\")\n if \"debug\" in pod_name:\n log.info(f\"Skipping {pod_name} pod from validation\")\n continue\n restart_count = (\n pod.get().get(\"status\").get(\"containerStatuses\")[0].get(\"restartCount\")\n )\n for item in pod.get().get(\"status\").get(\"containerStatuses\"):\n # Validate pod is oomkilled\n container_name = item.get(\"name\")\n assert validate_pod_oomkilled(\n pod_name=pod_name, container=container_name\n ), f\"Pod {pod_name} OOMKILLED while running IOs\"\n\n # Validate pod is running and not restarted\n assert validate_pods_are_running_and_not_restarted(\n pod_name=pod_name,\n pod_restart_count=restart_count,\n namespace=config.ENV_DATA[\"cluster_namespace\"],\n ), f\"Pod {pod_name} is either not running or restarted while running IOs\"\n\n # Check ceph health is OK\n ceph_health_check()",
"def check_process_full(self) -> None:\n if len(self.process_queue) >= self.max_processes:\n task_name, sp = self.process_queue.pop()\n sp.wait()",
"def race_condition():\n if len(allocated_pids) != len(set(allocated_pids)):\n return True\n else:\n return False"
] | [
"0.7389043",
"0.6897777",
"0.6816135",
"0.6575307",
"0.6531362",
"0.6480457",
"0.6444165",
"0.6391992",
"0.63755715",
"0.6345061",
"0.6262",
"0.6207695",
"0.6162055",
"0.6090997",
"0.60754627",
"0.59734434",
"0.594996",
"0.5924127",
"0.5866743",
"0.58547074",
"0.5805897",
"0.5795071",
"0.5766161",
"0.5758804",
"0.57293856",
"0.56779015",
"0.567779",
"0.56496423",
"0.56469756",
"0.56372637"
] | 0.74760336 | 0 |
Return PIDs of currently running unison instances. | def get_running_unison_processes(self):
# Get PIDs
# Note: throws exception if no instances exist
try:
pids = str(subprocess.check_output(["pidof", '/usr/bin/unison']))
# Parse command output into list by removing junk chars and exploding
# string with space delimiter
pids = pids[2:-3].split(' ')
except subprocess.CalledProcessError:
# If error caught here, no unison instances are found running
pids = []
self.logger.debug(
"Found " + str(len(pids)) + " running instances on this system: PIDs " +
", ".join(pids)
)
# Return, after converting to ints
return list(map(int, pids)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pids():\n stream = os.popen(\"ps aux | grep '[m]itm' | awk '{print $2}'\")\n return stream.read()",
"def get_pid_list():\r\n pids = [int(x) for x in os.listdir('/proc') if x.isdigit()]\r\n return pids",
"def detect_instance_pids(self):\n for instance in self.all_instances:\n instance.detect_pid(\n ppid=self.instance.pid,\n full_binary_path=self.cfg.real_sbin_dir,\n offset=0,\n )\n\n self.show_all_instances()\n self.detect_arangosh_instances(self.cfg, self.cfg.version)",
"def getActiveProcesses():\n active = []\n\n for p in PROCESSRUNNER_PROCESSES:\n if p.is_alive():\n active.append(p)\n\n return active",
"def running_procs(self) -> List[int]:\n return [p.model_id for p in self.primary_scheduler.queue_nodes.run_q]",
"def pids(node, java_class):\n cmd = \"ps -C java -wwo pid,args | grep '%s' | awk -F' ' '{print $1}'\" % java_class\n\n return [int(pid) for pid in node.account.ssh_capture(cmd, allow_fail=True)]",
"def get_instances_ids(self):\n reservations = self.__get_reservations()\n instances_ids = []\n instances,_ = self.__get_multi_instances(reservations)\n for instance in instances:\n instances_ids.append(instance.id.encode(\"latin-1\"))\n return instances_ids",
"def pids(self):\n return self._pidToProcess.iterkeys()",
"def pid_processes(self):\n return [(process.namespec(), process.infos[self.address_name]['pid'])\n for process in self.processes.values()\n if process.pid_running_on(self.address_name)]",
"def pids(self):\n resp = self.server.request(\"get\", \"/jobs/%s/%s/pids\" % (\n self.sessionid, self.name))\n result = self.server.json_body(resp)\n return result['pids']",
"def get_running_pris(self):\n try:\n running_pris_list = []\n output = self.ssh.exec_command(self.check_running_kombu_dialer_command)\n for line in output[1].readlines():\n line = line.split()\n if self.server in line and \"-g\" in line:\n running_pris_list.append(\n int(\n line[line.index(\"-g\")+1][2:]\n )\n )\n return running_pris_list\n except Exception as err:\n self.error_logger.error(err.message + \" PRITester::get_running_pris\")\n return None",
"def get_processes_running():\r\n p = [] #array of processes\r\n if platform == \"linux\" or platform == \"linux2\":\r\n for proc in psutil.process_iter():\r\n try:\r\n tmp=Process(proc.name(),int(proc.pid),proc.username(),int(0),int(0))\r\n p.append(tmp)\r\n except:\r\n continue\r\n return (p)\r\n\t\t\t\r\n tasks = check_output(['tasklist']).decode('cp866', 'ignore').split(\"\\r\\n\")\r\n for task in tasks:\r\n m = re.match(b'(.*?)\\\\s+(\\\\d+)\\\\s+(\\\\w+)\\\\s+(\\\\w+)\\\\s+(.*?)\\\\s.*', task.encode())\r\n if m is not None:\r\n tmp=Process(m.group(1).decode(),int(m.group(2).decode()),m.group(3).decode(),int(m.group(4).decode()),int(m.group(5).decode('ascii', 'ignore')))\r\n p.append(tmp)\r\n #m.group(1).decode() image name\r\n #m.group(2).decode() process id\r\n #m.group(3).decode() session_name\r\n #m.group(4).decode() session_num\r\n #m.group(5).decode('ascii', 'ignore') memory usage\r\n return(p)",
"def pids(self):\r\n return copy(self._pids)",
"def PIDs():\n from ctypes import windll,c_ulong,byref,sizeof\n PIDs = (c_ulong*512)()\n size_of_PIDs = c_ulong()\n windll.psapi.EnumProcesses(byref(PIDs),sizeof(PIDs),byref(size_of_PIDs))\n nPIDs = size_of_PIDs.value/sizeof(c_ulong())\n pidProcess = sorted([int(i) for i in PIDs][:nPIDs])\n return pidProcess",
"def get_running():\n ps = which('/usr/bin/ps') # avoid the old BSD variant\n lines = sh(ps, '-e', '-f', quiet=True)\n # The first line of the `ps' output is a header line which is\n # used to find the data field columns.\n column = lines[0].index('CMD')\n procs = set()\n for line in lines[1:]:\n cmd_line = line[column:]\n command = cmd_line.split()[0]\n procs.add(os.path.basename(command))\n return procs",
"def _get_instance_ids(instances):\n instance_ids = []\n for instance in instances:\n instance_ids.append(instance.id)\n return instance_ids",
"def list_instance_uuids(self):\n return self.list_instances()",
"def get_all_current_processes():\n p = subprocess.Popen(['ps', '-A'], stdout=subprocess.PIPE)\n out, err = p.communicate()\n return out",
"def processor_ids(self):\n return self._processor_ids",
"def get_running_processes(self):\n\n all_processes = []\n for _process in self.processes:\n all_processes.append(_process[\"pid\"])\n return all_processes",
"def pids(self, node):\n try:\n cmd = \"ps ax | grep -i 'redpanda\\|node' | grep -v grep | awk '{print $1}'\"\n pid_arr = [\n pid for pid in node.account.ssh_capture(\n cmd, allow_fail=True, callback=int)\n ]\n return pid_arr\n except (RemoteCommandError, ValueError):\n return []",
"def running_processes(self):\n return [process for process in self.processes.values()\n if process.running_on(self.address_name)]",
"def detect_instance_pids_still_alive(self):\n missing_instances = []\n running_pids = psutil.pids()\n for instance in self.all_instances:\n if instance.pid not in running_pids:\n missing_instances.append(instance)\n\n if len(missing_instances) > 0:\n logging.error(\n \"Not all instances are alive. The following are not running: %s\",\n str(missing_instances),\n )\n logging.error(get_process_tree())\n raise Exception(\"instances missing: \" + str(missing_instances))\n instances_table = get_instances_table(self.get_instance_essentials())\n logging.info(\"All arangod instances still running: \\n%s\", str(instances_table))\n attach_table(instances_table, \"Instances table\")",
"def get_vid_pid_list(self):\n\n return self.vid_pid_s",
"def get_pid_of_all_workers(containers):\n res = []\n for i in containers:\n if \"mongo\" not in i.name and (\"slave\" in i.name or \"master\" in i.name):\n print(i.name, file=sys.stdout)\n pid = i.attrs[\"State\"][\"Pid\"]\n res.append(pid)\n return res",
"def _getAllRunningInstances(self):\n return self._ec2.get_only_instances(filters={\n 'tag:leader_instance_id': self._instanceId,\n 'instance-state-name': 'running'})",
"def get_pids(name=None):\n results = []\n for process in win32com.client.GetObject('winmgmts:').InstancesOf('Win32_Process'):\n if name is None or process.Properties_(\"Name\").Value == name:\n results.append(process.Properties_(\"ProcessID\").Value)\n return results",
"def ps(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, self.user, self.password)\n print(vmrun.listProcessesInGuest())",
"def ListProcesses(self):\n stdout, stderr = self.RunCmdOnDevice(\n ['/bin/ps', '--no-headers', '-A', '-o', 'pid,ppid,args:4096,state'],\n quiet=True)\n assert stderr == '', stderr\n procs = []\n for l in stdout.split('\\n'):\n if l == '':\n continue\n m = re.match(r'^\\s*(\\d+)\\s+(\\d+)\\s+(.+)\\s+(.+)', l, re.DOTALL)\n assert m\n procs.append(\n (int(m.group(1)), m.group(3).rstrip(), int(m.group(2)), m.group(4)))\n logging.debug(\"ListProcesses(<predicate>)->[%i processes]\" % len(procs))\n return procs",
"def get_user_processes(user):\n result = []\n for process in psutil.process_iter():\n if process.username() == user:\n result.append(process.pid)\n return result"
] | [
"0.6817835",
"0.66776186",
"0.66677475",
"0.652992",
"0.64818245",
"0.64350146",
"0.64334714",
"0.6432905",
"0.6400463",
"0.6384319",
"0.63838774",
"0.63823915",
"0.6377238",
"0.6373727",
"0.63699615",
"0.6306814",
"0.6305249",
"0.63029253",
"0.6282789",
"0.6215774",
"0.6207281",
"0.6164416",
"0.6133893",
"0.6077494",
"0.6062262",
"0.60307604",
"0.6006469",
"0.5978291",
"0.5974449",
"0.5964022"
] | 0.86521405 | 0 |
Import config from config, and apply details where needed. | def import_config(self):
# Get the config file
import config
# Get all keys from keyvalue pairs in the config file
settingsFromConfigFile = [x for x in dir(config) if not x.startswith('__')]
# Convert config file into dict
for key in settingsFromConfigFile:
value = getattr(config, key)
self.config[key] = value
# Settings validation: specify keys which are valid settings
# If there are rows in the config file which are not listed here, an
# error will be raised
validSettings = {
'data_dir',
'running_data_dir',
'unison_log_dir',
'unisonctrl_log_dir',
'log_file',
'make_root_directories_if_not_found',
'sync_hierarchy_rules',
'unison_local_root',
'unison_remote_root',
'unison_path',
'global_unison_config_options',
'unison_remote_ssh_conn',
'unison_remote_ssh_keyfile',
'unison_local_hostname',
'unison_home_dir',
'unison_user',
'webhooks',
'rotate_logs',
}
# If a setting contains a directory path, add it's key here and it will
# be sanatized (whitespace and trailing whitespaces stripped)
settingPathsToSanitize = {
'data_dir',
'unison_home_dir',
'running_data_dir',
'unison_log_dir',
'unisonctrl_log_dir',
}
# Values here are used as config values unless overridden in the
# config.py file
defaultSettings = {
'data_dir': '/tmp/unisonctrl',
'log_file': '/dev/null',
'make_root_directories_if_not_found': True,
'unison_path': '/usr/bin/unison', # Default ubuntu path for unison
'unison_remote_ssh_keyfile': "",
'unison_local_hostname': platform.node(),
'running_data_dir': self.config['data_dir'] + os.sep + "running-sync-instance-information",
'unison_log_dir': self.config['data_dir'] + os.sep + "unison-logs",
'unisonctrl_log_dir': self.config['data_dir'] + os.sep + "unisonctrl-logs",
'unison_user': getpass.getuser(),
'rotate_logs': "time",
}
# TODO: Implement allowedSettings, which force settings to be
# in a given list of options
# Apply default settings to fill gaps between explicitly set ones
for key in defaultSettings:
if (key not in self.config):
self.config[key] = defaultSettings[key]
# Ensure all required keys are specified
for key in validSettings:
if (key not in self.config):
raise LookupError("Required config entry '" + key + "' not specified")
# Ensure no additional keys are specified
for key in self.config:
if (key not in validSettings):
raise LookupError("Unknown config entry: '" + key + "'")
# Sanatize directory paths
for key in settingPathsToSanitize:
self.config[key] = self.sanatize_path(self.config[key])
# If you reach here, configuration was read and imported without error
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setup(self, config):\n config_location = None\n try:\n try:\n stream = config.read()\n if hasattr(config, 'name'):\n config_location = config.name\n except (AttributeError, TypeError):\n f = file(config)\n stream = f.read()\n config_location = f.name\n except (AttributeError, TypeError):\n stream = config\n\n try:\n config = yaml_load(stream, Loader=yaml_loader)\n config = CaseInsensitiveDictMapper(config)\n\n if config_location:\n self._config.app_path = os.path.abspath(config_location)\n\n elif not 'config' in config or \\\n not 'app_path' in config['config']:\n raise ConfigError('app_path could not be calculated and is not set in config')\n\n except YAMLError as e:\n error = 'Import failed with malformed config'\n if hasattr(e, 'problem_mark'):\n mark = e.problem_mark\n error += ' at: (%s:%s)' % (mark.line+1, mark.column+1)\n raise ConfigError(error)\n\n if not self._validate_imported_config(config):\n raise ConfigError('Import failed: config invalid')\n\n if 'config' in config:\n self._update_config_from_import(config['config'])\n\n if 'filters' in config:\n self._update_global_filters_from_import(config['filters'])\n\n if 'global filters' in config:\n self._update_global_filters_from_import(config['global filters'])\n\n if 'routes' in config:\n self._update_routes_from_import(config['routes'])\n\n log.debug(self._router)\n\n return True",
"def load_config(self):\n pass",
"def setup_config(self, args=None):\n self.config_parse(args=args)",
"def read_config(self, config):\n try:\n newconfig = ConfigObj(config, interpolation=False,\n configspec=self._configspec)\n except ConfigObjError as e:\n raise ConfigError(e)\n newconfig = self._validate(newconfig)\n self._config.merge(newconfig)\n logger.info(\"Loaded additional config: {0}\".format(config))",
"def initialize_from_config(self):",
"def update_from_file(self):\n config_path = os.environ.get('MINDINSIGHT_CONFIG', '')\n if not config_path:\n return\n\n config_module = None\n\n # python:full.path.for.config.module\n if config_path.startswith('python:'):\n config_module = import_module(config_path[len('python:'):])\n\n # file:full/path/for/config.py\n elif config_path.startswith('file:'):\n config_path = config_path[len('file:'):]\n module_name = '__mindinsightconfig__'\n config_module = types.ModuleType(module_name)\n machinery = import_module('importlib.machinery')\n loader = machinery.SourceFileLoader(module_name, config_path)\n loader.exec_module(config_module)\n\n if config_module is None:\n return\n\n for setting in dir(config_module):\n if setting.isupper() and setting in self._default_settings:\n setting_value = getattr(config_module, setting)\n setattr(self, setting, setting_value)\n self._explicit_settings.add(setting)",
"def setup_config():\n global config\n config = modConfig.Config(cmdline.config)",
"def setup(self):\n file_under_test = os.path.join(os.curdir, 'application-core',\n 'app.core.config.xml')\n with open(file_under_test) as f:\n config = f.read()\n self.config = objectify.fromstring(config)",
"def use_config_file(self):\n self.config_file = self.find_config_file()\n if self.config_file:\n self.apply_config_file(self.config_file)",
"def parse_config(self):\n # TODO: parse config file\n pass",
"def load_config(self):\n if os.path.exists(self.config_file):\n with open(self.config_file) as f:\n conf = json.load(f)\n\n self.update_attributes_from_config(conf)",
"def read_config(self, config_filename):",
"def _load_common_config(self, config: Dict[str, Any]) -> Dict[str, Any] :\n # Log level\n if 'loglevel' in self.args.loglevel:\n config.update({'verbosity': self.args.loglevel})\n else:\n config.update({'verbosity': 0})\n logging.basicConfig(\n level=logging.INFO if config['verbosity'] < 1 else logging.DEBUG,\n format= '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n )\n set_loggers(config['verbosity'])\n logger.info('Verbosity set to %s', config['verbosity'])\n\n # Add dynamic whitelist if found\n if 'dynamic_whitelist' in self.args and self.args.dynamic_whitelist:\n config['pairlist'] = {\n 'method': 'VolumePairList',\n 'config': {'number_assets': self.args.dynamic_whitelist}\n }\n logger.warning(\n 'Parameter --dynamic-whitelist has been deprecated, '\n 'and will be completely replaced by the whitelist dict in the future. '\n 'For now: using dynamically generated whitelist based on VolumePairList. '\n '(not applicable with Backtesting and Hyperopt)'\n )\n if self.args.db_url and self.args.db_url != constant.DEFAULT_DB_PROD_URL:\n config.update({'db_url': self.args.db_url})\n logger.info('Parameter --db-url detected ...')\n\n if config.get('dry_run', False):\n logger.info('Dry run is enabled')\n if config.get('db_url') in [None, constant.DEFAULT_DB_PROD_URL]:\n # Default to in-memory db for dry_run if not specified\n config['db_url'] = constant.DEFAULT_DB_DRYRUN_URL\n else:\n if not config.get('db_url', None):\n config['db_url'] = constant.DEFAULT_DB_PROD_URL\n logger.info('Dry run is disabled')\n\n if config.get('forcebuy_enable', False):\n logger.warning('`forcebuy` RPC message enabled.')\n\n # Setting max_open_trades to infinite if -1\n if config.get('max_open_trades') == -1:\n config['max_open_trades'] = float('inf')\n\n logger.info(f'Using DB: \"{config[\"db_url\"]}\"')\n\n # Check if the exchange set by the user is supported\n self.check_exchange(config)\n\n return config",
"def load_config():\n\t\ttry:\n\t\t\tconf = ConfigParser()\n\n\t\t\tconfig_path = get_config_path()\n\t\t\tconf.read(config_path)\n\n\t\t\t# save references to conf, and config_path in class variables\n\t\t\tConfig.config_path = config_path\n\t\t\tConfig.conf = conf\n\n\t\t\tConfig.source_dir = conf.get('paths', 'source_dir')\n\t\t\tConfig.lyrics_dir = conf.get('paths', 'lyrics_dir')\n\n\t\t\tConfig.save_to_file = conf.getboolean('actions', 'save_to_file')\n\t\t\tConfig.save_to_tag = conf.getboolean('actions', 'save_to_tag')\n\n\t\t\tConfig.overwrite = conf.getboolean('actions', 'overwrite')\n\n\t\t\t# Load all the sources\n\t\t\tConfig.lyric_wikia = conf.getboolean('sources', 'lyric_wikia')\n\t\t\tConfig.musix_match = conf.getboolean('sources', 'musix_match')\n\t\t\tConfig.lyricsmode = conf.getboolean('sources', 'lyricsmode')\n\t\t\tConfig.az_lyrics = conf.getboolean('sources', 'az_lyrics')\n\n\t\t\t# Loading this with user config, we need to call the load_config only once at start.\n\t\t\tConfig.lyric_files_in_dir = glob2.glob(os.path.join(Config.lyrics_dir, '**/*.txt'))\n\n\n\t\t# Catch file handling errors\n\t\texcept IOError as e:\n\t\t\tprint('Unable to load config.')\n\t\t\tprint(e)",
"def _load_config(self, args: argparse.Namespace):\n #\n # Load a config, filename may or may-not be provided...\n #\n try:\n self._config = TortugaScriptConfig.load(args.config)\n\n except ConfigException as ex:\n print(str(ex))\n sys.exit(0)\n\n #\n # Override the config with any provided argument values\n #\n if args.url:\n self._config.url = args.url\n if args.username:\n self._config.username = args.username\n if args.password:\n self._config.password = args.password\n if args.token:\n self._config.token = args.token\n self._config.verify = args.verify",
"def init_config(self):\n with open(self.config_file, 'r') as fh:\n self.config = json.load(fh, object_pairs_hook=OrderedDict)\n logger.info('Config loaded: %s' % os.path.abspath(self.config_file))",
"def init_config(self):\n with open(self.config_file, 'r') as fh:\n self.config = json.load(fh, object_pairs_hook=OrderedDict)\n logger.info('Config loaded: %s' % os.path.abspath(self.config_file))",
"def _parse_config(self, config=None):\r\n # TODO: Load user configuration from the file\r\n # self._current_user_name = get_from_conf(\r\n # config, \"user_name\", self._current_user_name\r\n # )\r\n pass",
"def populate_config(self, config):\n self.use_wine_mappings.set_active(config['use_wine_mappings'])\n self.force_recheck.set_active(config['force_recheck'])\n self._previous_force_recheck = config['force_recheck']\n self.resume.set_active(config['resume'])\n try:\n self.glade.get_widget('time_added_checkbox').set_active(\n 'time_added' in config['transfer_meta'])\n except KeyError:\n pass\n self.resume_dat_entry.set_text(config['previous_resume_dat_path'])",
"def _setConfig(self,config):\n if config:\n self.config = config\n else:\n from layman import config\n self.config = config",
"def load_from_config(self, **config: Any) -> None:\n for key, filename in config.items():\n self.load(filename, key)",
"def parse_config():\n config_path = Path(\"config.ini\")\n if config_path.exists():\n config.read(config_path)\n else:\n config[\"database\"] = {\"location\": \"image-database.db\"}\n config[\"images\"] = {\"extensions\": \".jpeg,.jpg,.png,.gif,.tiff\"}\n with open(config_path, \"w\") as configfile:\n config.write(configfile)\n config.read(config_path)",
"def load_from_conf(self):\r\n raise NotImplementedError",
"def from_config(config: dict):\n pass",
"def config():\n config_django()\n config_svisor()",
"def config():",
"def config():",
"def _configure(self):\n path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), 'config.yml'\n )\n\n with open(path) as file:\n defaultconfig = yaml.load(file)\n\n self.config = merge_dict(self.config, defaultconfig)\n\n if 'logging' in self.config:\n logging.config.dictConfig(self.config['logging'])\n else:\n logging.getLogger('sirbot').setLevel('INFO')",
"def load_config(cls, config_file = None):\n config = ConfigParser()\n \n files = [\"/etc/imp.cfg\", os.path.expanduser(\"~/.imp.cfg\"), \".wm\", \".imp\"]\n if config_file is not None:\n files.append(config_file)\n \n config.read(files)\n cls.__instance = config",
"def set_configuration(configuration):\n global _config\n _config = importlib.import_module(\"%s.%s\" % (_CONFIG_DIR, configuration))\n print \"loaded\", _config"
] | [
"0.7061655",
"0.6931682",
"0.67424726",
"0.6520522",
"0.65100807",
"0.64880663",
"0.64506406",
"0.6448089",
"0.6418599",
"0.6414208",
"0.63963264",
"0.6380314",
"0.63017356",
"0.62915653",
"0.62915236",
"0.6271026",
"0.6271026",
"0.6242148",
"0.62274957",
"0.6216356",
"0.6194805",
"0.6182691",
"0.617578",
"0.61728865",
"0.6157569",
"0.61558276",
"0.61558276",
"0.61540693",
"0.6153319",
"0.6146066"
] | 0.7400688 | 0 |
Send the predict request to the backend server, get the return value and do the post process Predict the input image, and get the result. User must specify the image_path, servable_name, dataset_name and output_strategy to get the predict result. | def predict(self, img_path, servable_name, dataset_name="mnist", strategy="TOP1_CLASS"):
# Check if args are valid
if not os.path.isfile(img_path):
print("The image path {} not exist!".format(img_path))
sys.exit(0)
trans_func = transform_checker.get(dataset_name)
if trans_func is None:
print("Currently dataset_name only supports {}!".format(list(transform_checker.keys())))
sys.exit(0)
if strategy not in ("TOP1_CLASS", "TOP5_CLASS", "gray2color", "color2gray"):
print("Currently strategy only supports `TOP1_CLASS`, `TOP5_CLASS`, `gray2color` and`color2gray`!")
sys.exit(0)
# Perform the transform operation for the input image
if servable_name == 'cyclegan_cityscape':
img = np.array(load_resized_img(img_path))
else:
img = Image.open(img_path)
img_data = trans_func(img)
if not self._server_started() is True:
print('Server not started at host %s, port %d' % (self.host, self.port))
sys.exit(0)
else:
# Construct the request payload
payload = {
'instance': {
'shape': list(img_data.shape),
'dtype': img_data.dtype.name,
'data': json.dumps(img_data.tolist())
},
'strategy': strategy
}
headers = {'Content-Type': 'application/json'}
url = 'http://'+self.host+':'+str(self.port)+'/servables/' + servable_name
res = requests.post(url=url, headers=headers, data=json.dumps(payload))
res.content.decode("utf-8")
res_body = res.json()
if res.status_code != requests.codes.ok:
print("Request error! Status code: ", res.status_code)
sys.exit(0)
elif res_body['status'] != 0:
print(res_body['err_msg'])
sys.exit(0)
else:
instance = res_body['instance']
res_data = np.array(json.loads(instance['data']))
if dataset_name == 'voc':
iw, ih = img.size
data = trans_func.postprocess(res_data, (ih, iw), strategy)
elif dataset_name == 'cityscape':
data = res_data
else:
data = trans_func.postprocess(res_data, strategy)
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def predict():\r\n \r\n data = {\"success\": False}\r\n if flask.request.files.get(\"image\"):\r\n # read image from request\r\n image = flask.request.files[\"image\"].read()\r\n # convert image to BGR\r\n image = read_image_bgr(io.BytesIO(image))\r\n # preprocess image for model\r\n image = preprocess_image(image, mode='pass')\r\n image, scale = resize_image(image)\r\n data[\"scale\"] = scale\r\n\r\n # process image\r\n with graph.as_default():\r\n start_time = time.time()\r\n # generate prediction bounding boxes, scores, and labels on the input image\r\n boxes, scores, labels = model.predict(np.expand_dims(image, axis=0))\r\n # add inference time to data dictionary\r\n data[\"time\"] = time.time() - start_time\r\n\r\n # add prediction boxes, scores, & labels to data dictionary\r\n data[\"predictions\"] = {\"boxes\": boxes.tolist(),\r\n \"scores\": scores.tolist(),\r\n \"labels\": labels.tolist()}\r\n\r\n # prediction was successful\r\n data[\"success\"] = True\r\n \r\n # return the data dictionary as a JSON response\r\n return flask.jsonify(data)",
"def post(self):\n result = {'status': 'error'}\n\n args = input_parser.parse_args()\n input_data = args['image'].read()\n image = self.model_wrapper._read_image(input_data)\n preds = self.model_wrapper._predict(image)\n\n # Modify this code if the schema is changed\n label_preds = [{'label_id': p[0], 'label': p[1], 'probability': p[2]} for p in [x for x in preds]]\n result['predictions'] = label_preds\n result['status'] = 'ok'\n\n return result",
"def predict():\n # initialize the data dictionary that will be returned from the\n # view\n data = {\"success\": False}\n\n # ensure an image was properly uploaded to our endpoint\n if flask.request.method == \"POST\":\n if flask.request.files.get(\"image\"):\n # read the image in PIL formats\n img = flask.request.files[\"image\"].read()\n img = Image.open(io.BytesIO(img))\n\n # preprocess the image and prepare it for classification\n img = predictor.prepare_image(img, target_size=(299, 299), http_request=True)\n\n # classify the input image and then initialize the list\n # of predictions to return to the client\n predictions = predictor.model.predict(img)\n\n dog_label = predictor.decode_prediction(np.argmax(predictions, axis=-1)[0])\n print(dog_label)\n result = {\"label\" : str(dog_label), \"probability\" : float(np.max(predictions[0]))}\n data[\"predictions\"] = result\n\n # indicate that the request was a success\n data[\"success\"] = True\n\n # return the data dictionary as a JSON response\n return flask.jsonify(data)",
"def predict(image_path):\n global graph\n with graph.as_default():\n image_size = (299, 299)\n img = image.load_img(image_path, target_size=image_size)\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n predictions = model.predict(x)\n print('Predicted:', decode_predictions(predictions, top=1)[0])\n return decode_predictions(predictions, top=1)[0]",
"def predict(image_path):\n img = image.load_img(image_path, target_size=image_size)\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n predictions = model.predict(x)\n plt.imshow(img)\n print('Predicted:', decode_predictions(predictions, top=1)[0])\n return decode_predictions(predictions, top=1)[0]",
"def predict(self, image):\n\n if self.__preprocess != None:\n image = self.__preprocess(image)\n\n result = self.__model.predict(image)\n\n if self.__postprocess != None:\n result = self.__postprocess(result)\n\n return result",
"def predict(self, request):\r\n f = request.files['image']\r\n \r\n img = Image.open(f)\r\n \r\n image = img.convert('RGB')\r\n \r\n image_np = load_image_into_numpy_array(image)\r\n output_dict = run_inference_for_single_image(model, image_np)\r\n vis_util.visualize_boxes_and_labels_on_image_array(\r\n image_np,\r\n output_dict['detection_boxes'],\r\n output_dict['detection_classes'],\r\n output_dict['detection_scores'],\r\n category_index,\r\n instance_masks=output_dict.get('detection_masks_reframed', None),\r\n use_normalized_coordinates=True,\r\n line_thickness=2, \r\n min_score_thresh=0.45, \r\n skip_scores=True)\r\n \r\n result_image = Image.fromarray(image_np)\r\n \r\n raw_bytes = BytesIO()\r\n result_image.save(raw_bytes, \"PNG\")\r\n \r\n return base64.b64encode(raw_bytes.getvalue()).decode(\"utf-8\")",
"def prediction_on_a_image(self, input, output,model_saved_path):\n\n # load the saved model\n if os.path.isfile(model_saved_path) is False:\n raise IOError('trained model: %s not exist' % model_saved_path)\n\n clf = joblib.load(model_saved_path)\n\n # split a large image to many small ones\n patch_w = 500 # parameters.get_digit_parameters(\"\", \"train_patch_width\", None, 'int')\n patch_h = 500 # parameters.get_digit_parameters(\"\", \"train_patch_height\", None, 'int')\n overlay_x = 0 # parameters.get_digit_parameters(\"\", \"train_pixel_overlay_x\", None, 'int')\n overlay_y = 0 # parameters.get_digit_parameters(\"\", \"train_pixel_overlay_y\", None, 'int')\n\n img_folder = os.path.dirname(input)\n img_name = os.path.basename(input)\n inf_list_txt = 'inf_image_list.txt'\n with open(inf_list_txt, 'w') as txt_obj:\n txt_obj.writelines(img_name + '\\n')\n\n img_patches = build_RS_data.make_dataset(img_folder, inf_list_txt, patch_w, patch_h, overlay_x, overlay_y,\n train=False)\n\n for img_idx, aImg_patches in enumerate(img_patches):\n inf_output_dir = 'inf_results' #os.path.splitext(img_name)[0]\n os.system('mkdir -p '+inf_output_dir)\n os.system('rm '+inf_output_dir+'/*')\n\n ## parallel inference patches\n # but it turns out not work due to the Pickle.PicklingError\n # not working due to mulitple parameters. Jan 9, 2019, hlc\n # use multiple thread\n num_cores = multiprocessing.cpu_count()\n print('number of thread %d' % num_cores)\n # theadPool = mp.Pool(num_cores) # multi threads, can not utilize all the CPUs? not sure hlc 2018-4-19\n theadPool = Pool(num_cores) # multi processes\n\n # inference_one_patch_svm(img_idx, image_count, p_idx, patch_count, inf_output_dir, img_patch, scaler,clf)\n\n parameters_list = [\n (img_idx, len(img_patches), idx, len(aImg_patches), inf_output_dir, img_patch, self._scaler, clf)\n for (idx, img_patch) in enumerate(aImg_patches)]\n # results = theadPool.map(inference_one_patch_svm, parameters_list) # not working\n results = theadPool.starmap(inference_one_patch_svm, parameters_list) # need python3\n print('result_list', results)\n\n # for p_idx, img_patch in enumerate(aImg_patches):\n # # read images\n # patch_data = build_RS_data.read_patch(img_patch) # read_whole_x_pixels(input)\n #\n # nbands, height, width = patch_data.shape\n #\n # X_predit = patch_data.reshape(nbands, -1)\n # X_predit = np.transpose(X_predit, (1, 0))\n #\n # if os.path.isfile(scaler_saved_path) and self._scaler is None:\n # self._scaler = joblib.load(scaler_saved_path)\n # result = self._scaler.transform(X_predit)\n # X = result.tolist()\n # elif self._scaler is not None:\n # result = self._scaler.transform(X_predit)\n # X = result.tolist()\n # else:\n # X = X_predit\n # basic.outputlogMessage('warning, no pre-processing of data before prediction')\n #\n # # more method on prediction can be foudn in :\n # # https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html\n # pre_result = clf.predict(X)\n # result_img = pre_result.reshape((height, width))\n #\n # # save results\n # print('Save patch:%d/%d on Image:%d/%d , shape:(%d,%d)' %\n # (p_idx,len(aImg_patches), img_idx,len(img_patches), result_img.shape[0], result_img.shape[1]))\n #\n # # short the file name to avoid error of \" Argument list too long\", hlc 2018-Oct-29\n # file_name = \"I%d_%d\" % (img_idx, p_idx)\n #\n # save_path = os.path.join(inf_output_dir, file_name + '.tif')\n # build_RS_data.save_patch_oneband_8bit(img_patch,result_img.astype(np.uint8),save_path)\n #\n # with rasterio.open(input) as src_obj:\n # # Set spatial characteristics of the output object to mirror the input\n # kwargs = src_obj.meta\n # kwargs.update(\n # dtype=rasterio.uint8,\n # count=1)\n # # Create the file\n # with rasterio.open(output, 'w', **kwargs) as dst:\n # dst.write_band(1, result_img.astype(rasterio.uint8))\n # basic.outputlogMessage(\"save to %s\" % output)\n\n return True",
"def predict():\n\n\n json_payload = request.json\n #LOG.info(f\"JSON payload: %s\" %json_payload)\n inference_payload = pd.DataFrame(json_payload)\n #LOG.info(\"inference payload DataFrame: %s\" %inference_payload)\n scaled_payload = scale(inference_payload)\n prediction = list(clf.predict(scaled_payload))\n return jsonify({'prediction': prediction})",
"def predict(input_shape, model, image_path):\n \n # Load and resize the image using PIL.\n img = PIL.Image.open(image_path)\n print('input_shape: ', input_shape)\n img_resized = img.resize(input_shape, PIL.Image.LANCZOS)\n\n # Plot the image.\n plt.imshow(img_resized)\n plt.show()\n\n # Convert the PIL image to a numpy-array with the proper shape.\n img_array = np.expand_dims(np.array(img_resized), axis=0)\n\n # Use the ResNet50 model to make a prediction.\n # This outputs an array with 1000 numbers corresponding to\n # the classes of the ImageNet-dataset.\n pred = model.predict(img_array)\n \n # Decode the output of the ResNet50 model.\n pred_decoded = decode_predictions(pred)[0]\n\n # Print the predictions.\n for code, name, score in pred_decoded:\n print(\"{0:>6.2%} : {1}\".format(score, name))\n \n return",
"def predict(self, img):\n logger.info(\"predict() for %s\" %threading.current_thread())\n\n #detect face from the image\n face, rect = self.detect_face(img)\n\n if face is None or rect is None:\n #print(\"No face found for img \", type(img))\n return None, None, None, None\n\n if self.redis_server_password is None:\n # No training data available. Just perform detection and return\n # an error message in the subject value.\n warning = \"Training data not available. Redis password not set.\"\n subject = \"No Training Password\" # This will be displayed with the face\n confidence = 0\n logger.warning(\"%s\" %warning)\n return None, subject, confidence, rect\n\n #predict the image using our face recognizer\n label, confidence = self.face_recognizer.predict(face)\n #get name of respective label returned by face recognizer\n label_text = self.face_recognizer.getLabelInfo(label)\n logger.info(\"label=%s label_text=%s\" %(label, label_text))\n\n # print(label_text, confidence, rect)\n return img, label_text, confidence, rect",
"def predict(image_data):\n PAYLOAD = {}\n PAYLOAD[\"timestamp\"] = str(datetime.now())\n PAYLOAD[\"inference-type\"] = \"image-classification\"\n PAYLOAD[\"inference-description\"] = \"Top {} predictions with score {} or above \".format(\n config_utils.MAX_NO_OF_RESULTS, config_utils.SCORE_THRESHOLD\n )\n PAYLOAD[\"inference-results\"] = []\n\n try:\n # Run DLR to perform inference with DLC optimized model\n model_output = dlr_model.run(image_data)\n config_utils.logger.info(\"pred shape: '{}'.\".format(model_output[0][0].shape)) \n probabilities = softmax(model_output[0][0])\n config_utils.logger.info(\"pred shape softmax: '{}'.\".format(probabilities.shape)) \n sort_classes_by_probability = argsort(probabilities)[::-1]\n\n config_utils.logger.info(\"pred classes: '{}'.\".format(sort_classes_by_probability[: config_utils.MAX_NO_OF_RESULTS])) \n\n for i in sort_classes_by_probability[: config_utils.MAX_NO_OF_RESULTS]:\n if probabilities[i] >= config_utils.SCORE_THRESHOLD:\n result = {\"Label\": str(synset[i]), \"Score\": str(probabilities[i])}\n PAYLOAD[\"inference-results\"].append(result)\n\n config_utils.logger.info(dumps(PAYLOAD))\n\n if config_utils.TOPIC.strip() != \"\":\n ipc_utils.IPCUtils().publish_results_to_cloud(PAYLOAD)\n else:\n config_utils.logger.info(\"No topic set to publish the inference results to the cloud.\")\n\n except Exception as e:\n config_utils.logger.error(\"Exception occured during prediction: {}\".format(e))",
"def run(self, input_path, output_path):\n # read in data\n try:\n image = Image.open(input_path)\n except Exception:\n raise ValueError(\"invalid image file\")\n \n # data preprocessing\n img = self.preprocess(image)\n \n # perform inference\n output = self.model(img)\n \n # post process\n results = self.postprocess(output)\n \n # save output\n results = {'results': results}\n\n with open(output_path, 'w') as out:\n json.dump(results, out)",
"def predict(self, image_to_predict):\n\n y_pred = self.classifier.predict(image_to_predict)\n\n return y_pred",
"def predict(cls, image_path: str) -> tuple:\n\n print(\"Classify input image: \")\n return cls.model.predict(image_path)",
"def predict(trained_model, X_test, y_test, image_name):\n if MODEL == 1:\n return predict_1(trained_model, X_test, y_test)\n elif MODEL == 3:\n if CROSS_VALIDATION:\n return cv_predict_3(trained_model, X_test, y_test)\n else:\n return predict_3(trained_model, X_test, y_test, image_name)\n elif MODEL == 2:\n return predict_2(trained_model, X_test, y_test)\n else:\n # For models 4, 5 and 6\n return predict_4(trained_model, X_test, y_test)",
"async def predict(iris: IrisPredictionInput) :\n return clf.predict(iris.data)",
"def run(request):\n try:\n # ensure the user has used a POST request\n if request.method == \"POST\":\n # prepare data for prediction\n # note: we expect an image of size 28x28 here.\n # TODO: add handling of images that are not 28x28, either resize or deny\n try:\n data = np.asarray(Image.open(io.BytesIO(request.get_data(False)))).reshape(-1, 28, 28)\n except UnidentifiedImageError:\n raise ValueError(\n \"The provided image data could not be read. Ensure that you provide a valid image, eg. in jpeg or \"\n \"png format.\"\n )\n\n # do prediction\n prediction_confidences = neural_network.predict(data)\n predicted_label_index = np.argmax(prediction_confidences)\n predicted_label = labels[predicted_label_index]\n confidence = prediction_confidences[0][predicted_label_index]\n\n # return result\n return AMLResponse(\n {\"predicted_label\": predicted_label, \"confidence\": str(confidence)}, status_code=200, json_str=True,\n )\n else:\n raise Exception(\"This service supports POST requests only.\")\n\n except Exception as exception:\n return AMLResponse(\n {\"error\": repr(exception), \"traceback\": traceback.format_exc()}, status_code=500, json_str=True,\n )",
"def predict() -> Any:\n threshold = request.form.get(\"threshold\", type=float)\n source_size = request.form.get(\"source_size\", type=bool)\n images = request.files.getlist(\"images\")\n result = {}\n for image in images:\n input_image = prepare_input(image)\n if input_image is not None:\n output_image = model.predict(input_image, threshold, source_size)\n if output_image is not None:\n result[image.filename] = prepare_output(output_image)\n else:\n result[image.filename] = None\n else:\n result[image.filename] = None\n return result",
"def inference():\n if request.method == \"POST\":\n data = request.json #\n src_img = np.array(data[\"src\"]).astype(np.uint8) # Parsing data\n ref_img = np.array(data[\"ref\"]).astype(np.uint8) #\n ref_label = int(data[\"ref_label\"]) #\n result = get_inference(src_img, ref_img, ref_label) # Calling helper function\n return jsonify({\"result\": result.tolist()}) # Returning results into json",
"def process_prediction():\n try:\n input = np.array(request.json, dtype=np.float32)\n prediction = predictor.predict(input).numpy().tolist()\n return jsonify(result=prediction, status=\"Prediction succeeded\")\n except Exception as err:\n return jsonify(result=None, status=f\"Prediction failed: {err}\")",
"def predict_one_image(img_path, prediction_model):\n # Load image and resize it\n img = image.load_img(img_path, target_size=(224, 224))\n # Transform it in array\n x = image.img_to_array(img)\n # Expand array dimension\n x = np.expand_dims(x, axis=0)\n # Make prediction\n prediction_score = prediction_model.predict(x)\n return prediction_score",
"def inference():\n data = request.get_json(force = True)\n\n with torch.no_grad():\n torch.cuda.empty_cache()\n image = ToTensor(Image.open(BytesIO(b64decode(data['image'])))).half().cuda().unsqueeze_(0)\n inputs = test_transform(image)\n model_id = model_usage.get(True)\n outputs = model[model_id](inputs)[0]\n model_usage.put(model_id, False)\n prediction = classes[outputs.argmax(0)]\n del inputs, outputs, image\n \n image_storage.put((data['esun_uuid'], data['image'], prediction), False)\n\n t = datetime.datetime.now()\n ts = str(int(t.utcnow().timestamp()))\n s = sha256()\n s.update((CAPTAIN_EMAIL + ts + SALT).encode(\"utf-8\"))\n server_uuid = s.hexdigest()\n\n return jsonify({'esun_uuid': data['esun_uuid'],\n 'server_uuid': server_uuid,\n 'answer': prediction,\n 'server_timestamp': time()})",
"def predict(self):\n self.canv.update()\n ps = self.canv.postscript(colormode='mono')\n img = Image.open(io.BytesIO(ps.encode('utf-8')))\n img.save('result.png')\n x = Predict.transform_image(self)\n \n #prediction with multivariate regression\n Y_hat_test = self.multivariate_model.predict([x])\n C_multivariate = map(np.argmax, Y_hat_test) # classification vector\n C_multivariate = list(C_multivariate)\n multivariate_predict = C_multivariate[0]\n\n \n #prediction with Linear Discriminant Analysis (LDA)\n lda_predict = self.lda_model.predict([x])[0]\n qda_predict = self.qda_model.predict([x])[0]\n log_predict = self.log_model.predict([x])[0]\n \n baseline_label = Label(self, text='Baseline: ' + str(multivariate_predict) )\n baseline_label.grid(row=0, column=1, padx=5, pady=5)\n lda_label = Label(self, text=' LDA: '+ str(lda_predict))\n lda_label.grid(row=0, column=2, padx=5, pady=5)\n qda_label = Label(self, text='QDA: '+ str(qda_predict))\n qda_label.grid(row=1, column=1, padx=5, pady=5)\n log_label = Label(self, text=' Logistic: '+str(log_predict))\n log_label.grid(row=1, column=2, padx=5, pady=5)",
"def predict() -> flask.Response:\n try:\n # read mapillary keys\n mapillary_keys = request.args[\"mapillary_keys\"]\n image_ids = json.loads(mapillary_keys)\n\n # put single image into a list\n if isinstance(image_ids, str):\n image_ids = [image_ids]\n\n # create temporary directory\n tmp_dir = Path(__file__).parents[0] / \"tmp_dir\"\n Path(tmp_dir).mkdir(exist_ok=True)\n\n # iterate over all keys and get a prediction\n predictions = {}\n for image_id in image_ids:\n print(\"Getting prediction for image_id={image_id}\".format(image_id=image_id))\n image_save_path = download_mapillary_image(image_id, tmp_dir)\n img_tensor = transform_image(image_save_path)\n smoothness, surface = predict_image(img_tensor)\n predictions[image_id] = {\"smoothness\": smoothness, \"surface\": surface}\n\n # delete temporary directory\n shutil.rmtree(tmp_dir)\n return jsonify(predictions)\n except:\n # in case of any error, return the traceback\n return jsonify({\"trace\": traceback.format_exc()})",
"def predict(model, img, imgSize):\n \n #Reajusta o tamanho da imagem para o tamanho esperado caso necessario.\n if img.size != imgSize :\n img = img.resize(imgSize)\n\n #Converte a imagem num array tridimensional.\n x = image.img_to_array(img)\n x = numpy.expand_dims(x, axis=0)\n #Normaliza a imagem.\n x = preprocess_input(x)\n \n #Faz a previsao atraves da rede.\n pred = model.predict(x)\n return imagenet_utils.decode_predictions(pred, top=5)[0]",
"def predict(model, img):\n\tx = image.img_to_array(img)\n\tx = np.expand_dims(x, axis=0)\n\tx = preprocess_input(x)\n\tpreds = model.predict(x)\n\treturn preds[0]",
"def predict(self, data, version='default'):\n return self.skil.api.transformimage(\n deployment_name=self.deployment.name,\n image_transform_name=self.model_name,\n version_name=version,\n files=data\n )",
"def predict():\n \n ## input checking\n if not request.json:\n print(\"ERROR: API (predict): did not receive request data\")\n return jsonify([])\n\n if 'query' not in request.json:\n print(\"ERROR: API (predict): received request, but no 'query' found within\")\n return jsonify([])\n\n ## set the test flag\n test = False\n if 'mode' in request.json and request.json['mode'] == 'test':\n test = True\n\n ## extract the query\n query = request.json['query']\n \n ## load model\n if test:\n data, models = model_load(prefix='test')\n else:\n data, models = model_load()\n \n if not models:\n print(\"ERROR: API (predict): models not available\")\n return jsonify([])\n\n _result = model_predict(**query,all_models=models,test=test)\n result = {}\n \n ## convert numpy objects to ensure they are serializable\n for key,item in _result.items():\n if isinstance(item,np.ndarray):\n result[key] = item.tolist()\n else:\n result[key] = item\n \n return(jsonify(result))",
"def predict():\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n return 'No file found'\n user_file = request.files['file']\n if user_file.filename == '':\n return 'file name not found …'\n else:\n path=os.path.join(os.getcwd()+user_file.filename)\n user_file.save(path)\n K.clear_session() \n classes = pred(path)\n K.clear_session() \n \n return jsonify({\n \"status\":\"success\",\n \"prediction\":classes[0],\n \"confidence\":str(classes[1])\n })"
] | [
"0.7527697",
"0.7489377",
"0.733074",
"0.7176334",
"0.70035565",
"0.6992929",
"0.6954645",
"0.68714213",
"0.68302935",
"0.67761964",
"0.67373866",
"0.67367834",
"0.6716685",
"0.66948694",
"0.6670388",
"0.66675407",
"0.6614825",
"0.66144085",
"0.65787727",
"0.6575756",
"0.6552536",
"0.6540134",
"0.65263224",
"0.6504371",
"0.64990366",
"0.6495372",
"0.64809483",
"0.6479603",
"0.64780676",
"0.6468782"
] | 0.78791124 | 0 |
Generates four day weekend report The four day weekends are calculated from the start_month through the end of the year along with the number of work days for the same time period. The reports takes into account any holidays that might fall within that time period and days designated as working from home (WFH). If show_workdays is set to True, a report with the work days is generated instead of the four day weekend dates. | def four_day_weekends(
*args,
start_month: int = 8,
paid_time_off: int = 200,
year: int = 2020,
show_workdays: bool = False
) -> None:
if len(args) > 0:
raise ValueError(ERROR_MSG)
# get number of weekends to subtract because holiday of US holidays for year
holidays = list()
# for hol in holidays.UnitedStates(years=year).items():
for i, hol in enumerate(FEDERAL_HOLIDAYS):
# print(hol, calendar.day_name[hol.weekday()])
if calendar.day_name[hol.weekday()] in ('Friday', 'Monday'):
holidays.append(hol)
# print(holidays)
# get number of weekends left in year from start_month
weekends = list()
# day_range = calendar.monthrange(year, start_month)
start_date = date(year, start_month, 1)
end_date = date(year, 12, 31)
for i in range((end_date - start_date).days + 1):
if calendar.day_name[(start_date + timedelta(days=i)).weekday()] in ('Friday', 'Monday'):
weekends.append(start_date + timedelta(days=i))
if calendar.day_name[weekends[0].weekday()] == 'Monday':
weekends = weekends[1:]
if calendar.day_name[weekends[-1].weekday()] == 'Friday':
weekends = weekends[:-1]
# print(weekends)
take_out_days = list()
for i, w in enumerate(weekends):
if w in holidays:
if (w - weekends[i - 1]).days == 3:
take_out_days.append(weekends[i - 1])
take_out_days.append(w)
if (weekends[i + 1] - w).days == 3:
take_out_days.append(w)
take_out_days.append(weekends[i + 1])
# print(w, weekends[i - 1], (w - weekends[i - 1]).days, weekends[i + 1], (weekends[i + 1]-w).days)
# print(take_out_days)
# print(AT_HOME)
four_day_weekends = [w for w in weekends if w not in take_out_days]
if (paid_time_off // 8) + 1 < len(four_day_weekends):
ast = four_day_weekends[:(-paid_time_off // 8) + 1][-1]
else:
ast = None
if show_workdays == False:
# print(ast)
print(" {} Four-Day Weekends".format(len(four_day_weekends) // 2))
print("========================")
print(" PTO: {} ({} days)".format(paid_time_off, paid_time_off // 8))
print(" BALANCE: {} ({} days)".format(paid_time_off - (len(four_day_weekends) * 8),
abs((paid_time_off - (len(four_day_weekends) * 8)) // 8)))
print()
it = iter(four_day_weekends)
for x in it:
next_date = next(it)
if ast and ast in (x, next_date):
print('{} - {} *'.format(x, next_date))
else:
print('{} - {}'.format(x, next_date))
else:
work_days = list()
for i in range((end_date - start_date).days + 1):
if (start_date + timedelta(days=i)).weekday() not in AT_HOME and start_date + timedelta(
days=i) not in four_day_weekends and start_date + timedelta(
days=i) not in holidays and start_date + timedelta(
days=i) not in FEDERAL_HOLIDAYS:
work_days.append(start_date + timedelta(days=i))
# print(work_days)
work_days_remaining = len(work_days)
print("Remaining Work Days: {} ({} days)".format(work_days_remaining * 8, work_days_remaining))
it = iter(work_days)
for x in it:
print(x) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def four_day_weekends(*args,\n start_month: int = 8,\n paid_time_off: int = 200,\n year: int = 2020,\n show_workdays: bool = False\n ) -> None:\n\n\n if args:\n raise ValueError(ERROR_MSG)\n else:\n four_day_weekends = workdays = 0\n weekend_dates =[]\n workday_dates = []\n current = date(year,start_month,1)\n current += relativedelta(weekday=FR)\n first_monday = current + relativedelta(weekday=MO(-1))\n if first_monday.year == year and first_monday.month == start_month:\n workday_dates.append(first_monday)\n \n\n\n\n while current.year == year:\n monday = current + relativedelta(weekday=MO)\n thursday = current - relativedelta(days=1)\n if thursday.year == year and thursday not in FEDERAL_HOLIDAYS:\n workday_dates.append(thursday)\n\n\n dates = [current,monday]\n if monday.year == year:\n if all(date not in FEDERAL_HOLIDAYS for date in dates):\n weekend_dates.append((current,monday))\n four_day_weekends += 1\n else:\n if monday not in FEDERAL_HOLIDAYS:\n workday_dates.append(monday)\n if current not in FEDERAL_HOLIDAYS:\n workday_dates.append(current)\n else:\n if current not in FEDERAL_HOLIDAYS:\n workday_dates.append(current)\n\n\n\n\n\n\n current += relativedelta(weeks=1)\n \n\n\n last_thursday = current - relativedelta(days=1)\n if last_thursday.year == year and last_thursday not in FEDERAL_HOLIDAYS:\n workday_dates.append(last_thursday)\n workdays = len(workday_dates)\n\n\n \n if not show_workdays:\n length = len(str(paid_time_off))\n number = 24 \n before_days = paid_time_off//8 \n new_balance = paid_time_off - HOURS * four_day_weekends * 2\n new_days = abs(new_balance // 8)\n title = f'{four_day_weekends} Four-Day Weekend{\"s\" if four_day_weekends != 1 else \"\"}'\n print(f'{title:^{number}}')\n print('='* 24)\n\n labels = ['PTO:','BALANCE:']\n original = [paid_time_off,new_balance]\n new = [before_days,new_days]\n\n \n for label,value_1,value_2 in zip(labels,original,new):\n print(f'{label:>8} {value_1:>{length}} ({value_2} days)')\n\n print()\n \n\n\n start_losing = (four_day_weekends * 2 - before_days)//2\n date_start_losing = None\n if start_losing > 0:\n date_start_losing = weekend_dates[start_losing]\n for i,(weekend_start,weekend_end) in enumerate(weekend_dates):\n print(f\"{weekend_start} - {weekend_end}\",end='')\n if (weekend_start,weekend_end) == date_start_losing:\n print(' *')\n else:\n print()\n\n\n else:\n print(f'Remaining Work Days: {workdays * 8} ({workdays} days)')\n\n\n print('\\n'.join(map(str,workday_dates)))",
"def printSummary(self):\n\t\tweekWorkHours = None\n\t\tdayDelta = None\n\t\tfor num in self.workdays:\n\t\t\tday = self.workdays[num]\n\t\t\tif day.daytype == DayType.weekend:\n\t\t\t\tif weekWorkHours:\n\t\t\t\t\thours = weekWorkHours.total_seconds() // 3600\n\t\t\t\t\tmins = weekWorkHours.seconds // 60 % 60\n\t\t\t\t\tprinty('------{}hrs-----'.format(hours), 'y')\n\t\t\t\t\tweekWorkHours = None\n\t\t\t\t\tdayDelta = None\n\t\t\t\tprinty('{:02d}. (WE)'.format(num), 'w')\n\t\t\telif day.daytype == DayType.holiday:\n\t\t\t\tprinty('{:02d}. (Urlaub)'.format(num), 'c')\n\t\t\t\tdayDelta = timedelta(hours=8)\n\t\t\telif day.daytype == DayType.illness:\n\t\t\t\tprinty('{:02d}. (Krank)'.format(num), 'c')\n\t\t\t\tdayDelta = timedelta(hours=8)\n\t\t\telif day.daytype == DayType.overtime_free:\n\t\t\t\tprinty('{:02d}. (Überstundenausgleich)'.format(num), 'c')\n\t\t\t\tdayDelta = timedelta(hours=8)\n\t\t\telif day.daytype == DayType.business_trip:\n\t\t\t\tprinty('{:02d}. (Dienstreise)'.format(num), 'c')\n\t\t\t\tdayDelta = timedelta(hours=8)\n\t\t\telif day.daytype == DayType.work:\n\t\t\t\tdayDelta = day.getWorkingTime()\n\t\t\t\tworkhours = dayDelta.seconds // 3600\n\t\t\t\tworkrestminutes = dayDelta.seconds // 60 % 60\n\t\t\t\tabsday = datetime.strptime('{}.{}.{}'.format(num, self.monthNum, self.year),'%d.%m.%Y')\n\t\t\t\ttoday = datetime.today()\n\t\t\t\tpauseDelta = day.getPauseTime()\n\t\t\t\tpausehours = pauseDelta.seconds // 3600\n\t\t\t\tpauserestminutes = pauseDelta.seconds // 60 % 60\n\t\t\t\tif absday == today:\n\t\t\t\t\tprinty('{:02d}. {}:{}hrs (Pause: {}:{})'.format(num, workhours, workrestminutes, pausehours, pauserestminutes), 'wH')\n\t\t\t\telif absday > today:\n\t\t\t\t\t# future days\n\t\t\t\t\tif len(day.timeblocks) == 0:\n\t\t\t\t\t\tprinty('{:02d}. ?'.format(num), 'g')\n\t\t\t\t\telse:\n\t\t\t\t\t\tprinty('{:02d}. {}:{}hrs (Pause: {}:{})'.format(num, workhours, workrestminutes, pausehours, pauserestminutes), 'g')\n\t\t\t\telse:\n\t\t\t\t\t# past days\n\t\t\t\t\tif dayDelta > timedelta(hours=8):\n\t\t\t\t\t\tprinty('{:02d}. {}:{}hrs (Pause: {}:{})'.format(num, workhours, workrestminutes, pausehours, pauserestminutes), 'n>')\n\t\t\t\t\telif dayDelta < timedelta(hours=8):\n\t\t\t\t\t\tprinty('{:02d}. {}:{}hrs (Pause: {}:{})'.format(num, workhours, workrestminutes, pausehours, pauserestminutes), 'r>')\n\t\t\t\t\telse:\n\t\t\t\t\t\tprinty('{:02d}. {}:{}hrs (Pause: {}:{})'.format(num, workhours, workrestminutes, pausehours, pauserestminutes), 'n')\n\t\t\tif weekWorkHours == None:\n\t\t\t\tweekWorkHours = dayDelta\n\t\t\telse:\n\t\t\t\tif dayDelta:\n\t\t\t\t\tweekWorkHours = weekWorkHours + dayDelta",
"def test_no_weekend_dates(self):\n input_ = [\n self.indicator_record(date=datetime.date(2014, 10, 14), value=0.035657),\n ]\n output = self.expander._daily_workday_indicator_expander(input_)\n no_weekend_dates = [record.date.weekday() < 5 for record in output]\n\n self.assertTrue(all(no_weekend_dates))",
"def _create_week_dates_text(self):\n week_start = []\n week_end = []\n week_text = []\n week_start.append(self.start_date)\n week_end.append(self.start_date + timedelta(days=6))\n week_start.append(week_end[0] + timedelta(days=1))\n week_end.append(self.display_end_date)\n for i in (0,1):\n week_start_month = week_start[i].strftime(\"%b\")\n week_start_day = week_start[i].strftime(\"%d\").lstrip(\"0\")\n week_end_month = week_end[i].strftime(\"%b\")\n week_end_day = week_end[i].strftime(\"%d\").lstrip(\"0\")\n week_text.append(\"%s %s - %s %s\" %(week_start_month, \n week_start_day, week_end_month, week_end_day))\n return week_text",
"def compute_workdays(start, end):\n # Subtracts the total number of holidays from the total number of weekdays\n return compute_weekdays(start, end) - compute_holidays(start, end)[\"total holidays:\"]",
"def generate_xlsx_report(self, workbook, data, parts_data):\n worksheet = workbook.add_worksheet(\"daily_parts_issuance_wizard\")\n worksheet.set_column(0, 0, 10)\n worksheet.set_column(1, 1, 15)\n worksheet.set_column(2, 2, 20)\n worksheet.set_column(3, 3, 15)\n worksheet.set_column(4, 4, 10)\n worksheet.set_column(5, 5, 12)\n worksheet.set_column(6, 6, 10)\n worksheet.set_column(7, 7, 10)\n worksheet.set_column(8, 8, 15)\n worksheet.set_column(9, 9, 10)\n worksheet.set_column(10, 10, 15)\n worksheet.set_column(11, 11, 10)\n worksheet.set_column(12, 12, 20)\n worksheet.set_column(13, 13, 5)\n worksheet.set_column(14, 14, 5)\n worksheet.set_column(15, 15, 5)\n\n bold = workbook.add_format(\n {\"bold\": True, \"font_name\": \"Arial\", \"font_size\": \"10\"}\n )\n tot = workbook.add_format(\n {\"border\": 2, \"bold\": True, \"font_name\": \"Arial\", \"font_size\": \"10\"}\n )\n border = workbook.add_format(\n {\"border\": 2, \"font_name\": \"Arial\", \"font_size\": \"10\"}\n )\n merge_format = workbook.add_format({\"border\": 2, \"align\": \"center\"})\n format1 = workbook.add_format(\n {\"border\": 2, \"bold\": True, \"font_name\": \"Arial\", \"font_size\": \"10\"}\n )\n format1.set_bg_color(\"gray\")\n date = workbook.add_format({\"num_format\": \"dd/mm/yy\"})\n\n worksheet.merge_range(\"C3:F3\", \"Merged Cells\", merge_format)\n\n row = 0\n row += 1\n row += 1\n worksheet.write(row, 2, \"DAILY PARTS ISSUANCE\", tot)\n row += 1\n worksheet.write(row, 2, \"Date From:\", tot)\n worksheet.write(row, 3, data[\"form\"][\"date_from\"] or \"\", border)\n worksheet.write(row, 4, \"To:\", tot)\n worksheet.write(row, 5, data[\"form\"][\"date_to\"] or \"\", border)\n row += 2\n worksheet.write(row, 0, \"CMF\", bold)\n row = 3\n\n for objec in self.get_work_order_detail(data[\"form\"]):\n row += 3\n worksheet.write(row, 0, \"DATE ISSUED :\", bold)\n worksheet.write(row, 1, objec.get(\"date\") or \"\", date)\n row += 2\n worksheet.write(row, 0, \"NO.\", format1)\n worksheet.write(row, 1, \"WO NO.\", format1)\n worksheet.write(row, 2, \"VEHICLE ID\", format1)\n worksheet.write(row, 3, \"PART NO.\", format1)\n worksheet.write(row, 4, \"PART NAME\", format1)\n worksheet.write(row, 5, \"VEHICLE MAKE\", format1)\n worksheet.write(row, 6, \"USED\", format1)\n worksheet.write(row, 7, \"UNIT TYPE\", format1)\n worksheet.write(row, 8, \"OLD PART RETURND\", format1)\n worksheet.write(row, 9, \"ISSUED BY\", format1)\n worksheet.write(row, 10, \"REMARKS\", format1)\n line_row = row + 1\n line_col = 0\n counter = 1\n for obj in objec.get(\"value\"):\n worksheet.write(line_row, line_col, counter, border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.get(\"wo_name\") or \"\", border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.get(\"vehicle_id\") or \"\", border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.get(\"part_no\") or \"\", border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.get(\"part_name\") or \"\", border)\n line_col += 1\n worksheet.write(\n line_row, line_col, obj.get(\"vehicle_make\") or \"\", border\n )\n line_col += 1\n worksheet.write(line_row, line_col, obj.get(\"qty\") or \"\", border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.get(\"uom\") or \"\", border)\n line_col += 1\n worksheet.write(\n line_row, line_col, obj.get(\"old_part_return\") or \"\", border\n )\n line_col += 1\n worksheet.write(line_row, line_col, obj.get(\"issued_by\") or \"\", border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.get(\"remarks\") or \"\", border)\n line_col = 0\n line_row += 1\n counter += 1\n worksheet.write(line_row, line_col, \"********\", border)",
"def netto_workdays(self, start_date, end_date, holidays=[], weekend_days=[5,6]):\n delta_days = (end_date - start_date).days + 1\n full_weeks, extra_days = divmod(delta_days, 7)\n # num_workdays = how many days/week you work * total number of weeks\n num_workdays = (full_weeks + 1) * (7 - len(weekend_days))\n # subtract out any working days that fall in the 'shortened week'\n for d in range(1, 8 - extra_days):\n if (end_date + timedelta(d)).weekday() not in weekend_days:\n num_workdays -= 1\n # skip holidays that fall on weekend_days\n holidays = [x for x in holidays if x.weekday() not in weekend_days]\n # subtract out any holidays\n for d in holidays:\n if start_date <= d <= end_date:\n num_workdays -= 1\n return num_workdays",
"def week_report_handle(fans_type):\n\t#import pdb;pdb.set_trace()\n\tlast_day = datetime.date.today()-timedelta(days=datetime.datetime.today().weekday() + 1)\n\ttoday = datetime.date.today()\n\n\tfans_pages = FansPage.objects.filter(fans_type=fans_type, date__gte=last_day, date__lte=today).order_by(\"date\")\n\n\tstart = fans_pages[0]\n\tlast = fans_pages[len(fans_pages) - 1]\n\n\t#talk_about_is = (last.talk_about_is - start.talk_about_is) / (start.talk_about_is + 0.0) * 100\n\ttalk_about_is = (last.talk_about_is - start.talk_about_is)\n\t#total_like_count = (last.total_like_count - start.total_like_count) / (start.total_like_count + 0.0) * 100\n\ttotal_like_count = (last.total_like_count - start.total_like_count)\n\t#total_fans = (last.total_fans - start.total_fans) / (start.total_fans + 0.0) * 100\n\ttotal_fans = (last.total_fans - start.total_fans)\n\treturn {\"talk_about_is\":talk_about_is, \"total_like_count\":total_like_count, \"total_fans\":total_fans, \"start\":start.date, \"last\":last.date}",
"def compute_weekdays(start, end):\n # Subtracts the total number of weekend days from the total number of days\n return compute_total_days(start, end) - compute_weekends(start, end)",
"def gen_weeklyFrequency(self):\n\n if len(self.fields) == 0:\n return None\n\n if self.validator.validate(self.fields) == False:\n return None\n\n weeklyFrequency = 0\n dayFields = ['day1','day2','day3','day4','day5','day6','day7']\n for dayField in dayFields:\n if dayField in self.fields:\n if self.fields[dayField] == True:\n weeklyFrequency += 1\n\n return weeklyFrequency",
"def generate_waiter_financial_report_excel_file(self, staff_info, period, month_report, path):\n try:\n workbook = xlw.Workbook(path)\n worksheet = workbook.add_worksheet()\n\n file_header_format = workbook.add_format({\n 'font_size':20,\n 'align': 'center',\n 'valign': 'vcenter'\n })\n table_header_format = workbook.add_format({\n 'bold': 1,\n 'border': 1,\n 'align': 'center',\n 'valign': 'vcenter',\n 'font_size': 12,\n 'fg_color': '#C0C0C0'})\n cell_format = workbook.add_format({\n 'font_size': 12,\n 'align':'center',\n 'valign':'vcenter'\n })\n sum_format = workbook.add_format({\n 'font_size': 12,\n 'align': 'center',\n 'valign': 'vcenter',\n 'fg_color': '#99FF99'\n })\n\n worksheet.set_column('A:A', 10)\n worksheet.set_column('B:B', 30)\n worksheet.set_column('C:C', 20)\n worksheet.set_column('D:D', 20)\n worksheet.set_column('E:E', 20)\n worksheet.set_column('F:F', 10)\n worksheet.set_column('G:G', 15)\n\n worksheet.merge_range('A1:G2', f'{staff_info[3]} {staff_info[1]} {period}', file_header_format)\n\n row = 4\n column = 0\n\n for line in month_report:\n for item in line:\n if row == 4:\n worksheet.write(row, column, item.__str__(), table_header_format)\n else:\n if month_report.index(line) == len(month_report)-1 and line.index(item) == len(line)-1:\n worksheet.write(row, column, item.__str__(), sum_format)\n else:\n worksheet.write(row, column, item.__str__(), cell_format)\n column += 1\n row += 1\n column = 0\n\n workbook.close()\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')",
"def formatDay(self, themonth, date, num_weeks):\n if date.month == themonth:\n day_class = 'day'\n else:\n day_class = 'noday' # day outside month\n\n html = '<td class=\"%s' % day_class\n\n # if this is today then highlight it\n if date == self.today:\n html += ' today'\n today_text = 'Today '\n else:\n today_text = ''\n\n # if this is the selected date then tag it\n if date == self.selected_date or (self.selected_record\n and date == self.selected_record.start_date):\n html += ' selected'\n # if a filter range is set then tag it\n elif (self.filter_start_date and self.filter_finish_date\n and self.filter_start_date <= date\n and date <= self.filter_finish_date):\n html += ' filtered'\n\n html += ('\" style=\"height: %f%%\"><div class=\"%s_header\">'\n '<a class=\"block\" '\n 'href=\"?year=%d&month=%d&day=%d&clear_recording_id=1\">'\n '%s%d</a></div>' % (90.0 / num_weeks, day_class,\n date.year, date.month, date.day, today_text, date.day))\n\n if self._storage:\n for recording in self._storage.getRecordings(date,\n station=self.filter_station):\n extra_div_class = \"\"\n if (self.selected_record\n and recording.id == self.selected_record.id):\n extra_div_class += \" selected_entry\"\n if ((self.filter_title and self.filter_title\n != recording.title)\n or (self.filter_start_date and self.filter_start_date\n > recording.finish_time.date())\n or (self.filter_finish_date and self.filter_finish_date\n < recording.start_time.date())):\n extra_div_class += \" filtered_out\"\n html += ('<div class=\"day_entry%s\"><a class=\"block\" '\n 'href=\"?year=%d&month=%d&recording_id=%d'\n '&set_recording_id=1\">\\n'\n '<span class=\"recording_time\">%s</span>\\n'\n '<span class=\"recording_station\">%s</span>\\n'\n '<span class=\"recording_title\">%s</span>\\n'\n '</a></div>\\n' % (extra_div_class, date.year,\n date.month, recording.id,\n formatTimeUI(recording.start_time, compact=True),\n formatStationName(recording.station, compact=True),\n recording.title))\n\n return html + '</td>'",
"def do_rrw(self, arg):\n self.do_timesheet('report extend track week')",
"def week_delimited(timeframe_from: int, timeframe_to: int,\n lines: List[List[List[str]]], times: List[List[datetime]],\n colored: bool) -> None:\n daily_times = [0 for _ in range(timeframe_from - timeframe_to)]\n dates = [times[0][1]]\n beg_date = times[0][0].date()\n day = 0\n for start, stop in times:\n if (stop.date() - beg_date).days != day:\n dates.append(stop)\n day += 1\n daily_times[day] += (stop - start).seconds\n\n num_weeks = ceil((timeframe_from - timeframe_to) / 7)\n week_daily_times = [[] for _ in range(num_weeks)]\n week_dates = [[] for _ in range(num_weeks)]\n for idx, (date, daily_time) in enumerate(zip(dates, daily_times)):\n week_daily_times[idx // 7].append(daily_time)\n week_dates[idx // 7].append(date)\n\n total_seconds = sum(daily_times)\n average_seconds = total_seconds // num_weeks\n\n if colored:\n print(f'Chosen display: {colors.FG.BRIGHT.RED}'\n f'WEEK DELIMITED{colors.RESET}\\n')\n else:\n print('Chosen display: WEEK DELIMITED\\n')\n\n for dates, daily_times in zip(week_dates, week_daily_times):\n for date, daily_time in zip(dates, daily_times):\n date = date.strftime(DATE_FORMAT_PATTERN)\n display_summary(date, daily_time, colored)\n\n total = sum(daily_times)\n cols = os.get_terminal_size().columns\n if colored:\n print(f'\\n{colors.FG.BRIGHT.MAG}{\"=\" * (cols - 6)}'\n f'{colors.RESET}\\n')\n display_summary('Weekly amount', total, True)\n print(f'\\n{colors.FG.BRIGHT.MAG}{\"=\" * (cols - 6)}'\n f'{colors.RESET}\\n\\n')\n else:\n print(f'\\n{\"=\" * (cols - 6)}\\n')\n display_summary('Weekly amount', total, False)\n print(f'\\n{\"=\" * (cols - 6)}\\n\\n')\n\n display_summary('Average', average_seconds, colored, ' per week')\n display_summary('Total', total_seconds, colored)",
"def export(self):\n if len(self.records) == 0:\n exit_message = \"Exiting. There are no records for {} {} to export.\".format(self.args.date.strftime(\"%B\"), self.year)\n sys.exit(exit_message)\n\n total_days = (self.args.date.replace(month = self.args.date.month % 12 +1, day = 1)-timedelta(days=1)).day\n start_month = self.args.date.replace(day = 1)\n end_month = self.args.date.replace(day = total_days)\n workdays = self.netto_workdays(start_month, end_month, weekend_days=(5,6))\n template_file = os.path.join(self.config[\"templates_dir\"], \"template_timesheet_{}_days.xlsx\".format(workdays))\n\n export_file = os.path.join(self.config[\"exports_dir\"], \"timesheet_{}_{}.xlsx\".format(self.year, self.month_str))\n\n # set locale to use weekdays, months full name in german\n locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')\n wb = load_workbook(template_file)\n ws = wb.active\n ws.cell(row=7, column=4).value = self.config[\"name\"]\n month_year_str = \"{} {}\".format(self.args.date.strftime(\"%B\"), self.year)\n ws.cell(row=8, column=4).value = month_year_str\n row = 12\n for record in self.records:\n col = 2\n date = datetime.strptime(record[\"date\"], \"%d.%m.%Y\")\n ws.cell(row=row, column=col).value = date.strftime(\"%A\")\n col += 1\n ws.cell(row=row, column=col).value = date\n col += 1\n if \"special\" in record.keys() and record[\"special\"] == \"true\":\n ws.cell(row=row, column=9).value = 8.00\n col += 4\n else:\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"start_day\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"end_day\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"start_break\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"end_break\"], \"%H:%M\").time()\n col += 4\n ws.cell(row=row, column=col).value = record[\"comment\"]\n row += 1\n wb.save(export_file)\n return True",
"def record_week_totals(self, user, start, end, num=10):\n for idx, total in \\\n self.weekly_play_counts(user, start, end, num, order_by_plays=True):\n yield idx, ldates.date_of_index(idx), total",
"def show_weeks_tasks(self):\n for day in [datetime.today() + timedelta(days=i) for i in range(7)]:\n tasks = self.session.query(self.Table).filter(self.Table.deadline == day.strftime('%Y-%m-%d')).\\\n order_by(self.Table.deadline).all()\n print(f'{day.strftime(\"%A\")} {day.strftime(\"%d %b\")}:')\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f'{n}. {task.task}')\n else:\n print('Nothing to do!')\n print()",
"def run_week_observations(period_begin, period_end):\n observs = []\n # get all dates in the period range and find all unique weeknumbers\n all_dates = list(pd.date_range(period_begin, period_end))\n weeknumbers = list(set([x.isocalendar()[:2] for x in all_dates]))\n\n # get all the begin and end dates of the observable week (so the date of the monday and friday)\n # https://stackoverflow.com/questions/17087314/get-date-from-week-number\n all_periods = []\n for numb in weeknumbers:\n mon_date = datetime.strptime(f\"{numb[0]}-W{numb[1]}\" + '-1', '%G-W%V-%u')\n fri_date = mon_date + timedelta(4)\n all_periods.append((mon_date, fri_date))\n\n # run a new observation if the week hasn't been observerd\n if len(all_periods) > 0:\n for period in all_periods:\n # retrieve all data over the stocks in this period\n data = Stocks.objects.filter(date__range=period)\n if len(data) > 0:\n # convert the data to a dataframe\n q = data.values('component', 'indexx', 'date', 's_close')\n df_data = pd.DataFrame.from_records(q)\n\n # prepare the data for the analysis\n df_data.rename(columns={\"s_close\": \"close\"}, inplace=True)\n df_data['close'] = df_data['close'].astype('float')\n\n # load in the sector data and add it to the dataframe\n with open(r\"./articles_app/data/sectorcompany.json\") as f:\n sector_info = json.load(f)\n df_data[\"sector\"] = df_data[\"component\"].apply(lambda x: sector_info.get(x))\n df_data.dropna(inplace=True)\n\n # run the analyser to find observations\n analyse = Analyse(df_data, *period)\n analyse.find_weekly_observations()\n observs.extend(analyse.observations)\n return observs",
"def day_of_week(self):\n day_of_week_names = ['Monday', 'Tuesday', 'Wednesday', 'Thursday',\n 'Friday', 'Saturday', 'Sunday']\n diff = self.diff(Date(1, 1, 1970)) + 3\n while diff < 0:\n diff += 7\n print(day_of_week_names[diff % 7])",
"def record_weeks(self, user, start, end, num=10):\n query = self.user_weeks_between(user, start, end).order_by('-plays')[:num]\n for week in query:\n date = ldates.date_of_index(week.week_idx)\n yield week, date",
"def main():\n print(day_of_week(datetime.now()))\n print(day_of_week(datetime(2019, 7, 4)))\n print(day_of_week(datetime(2013, 12, 25)))\n print(day_of_week(datetime(2000, 1, 1)))",
"def hr_report():\n\n # Load the peak data.\n db = Persistence()\n if not (activities := db.load_all()):\n print(\"No data to report on\")\n return\n\n # Find the maximum for each value.\n max = _load_max_values(activities)\n\n # Totals for the current week\n week_distance_total = 0\n week_elevation_total = 0\n week_duration_total = timedelta()\n week_work_days = 0\n week_5sec_average = []\n week_30sec_average = []\n week_60sec_average = []\n week_5min_average = []\n week_10min_average = []\n week_20min_average = []\n week_30min_average = []\n week_60min_average = []\n week_90min_average = []\n week_120min_average = []\n\n # Print the peak data for each week.\n current_weekday = None\n for activity in activities:\n\n # Time to break to a new week?\n if current_weekday is None or current_weekday > activity.start_time.weekday():\n if current_weekday:\n _print_footer(\n week_distance_total=week_distance_total,\n week_elevation_total=week_elevation_total,\n week_duration_total=week_duration_total,\n week_work_days=week_work_days,\n week_5sec_average=week_5sec_average,\n week_30sec_average=week_30sec_average,\n week_60sec_average=week_60sec_average,\n week_5min_average=week_5min_average,\n week_10min_average=week_10min_average,\n week_20min_average=week_20min_average,\n week_30min_average=week_30min_average,\n week_60min_average=week_60min_average,\n week_90min_average=week_90min_average,\n week_120min_average=week_120min_average,\n )\n week_distance_total = 0\n week_elevation_total = 0\n week_duration_total = timedelta(0)\n week_work_days = 0\n week_5sec_average = []\n week_30sec_average = []\n week_60sec_average = []\n week_5min_average = []\n week_10min_average = []\n week_20min_average = []\n week_30min_average = []\n week_60min_average = []\n week_90min_average = []\n week_120min_average = []\n\n _print_header()\n\n # Capture the weekday.\n if current_weekday is None or current_weekday != activity.start_time.weekday():\n week_work_days = week_work_days + 1\n\n current_weekday = activity.start_time.weekday()\n\n # Print the detail.\n _print_detail(activity, max)\n\n # Find the duration.\n duration = activity.end_time - activity.start_time\n\n # Accumulate for this week\n week_distance_total = week_distance_total + activity.distance\n if activity.elevation:\n week_elevation_total = week_elevation_total + activity.elevation\n week_duration_total = week_duration_total + duration\n week_5sec_average.append(activity.peak_5sec_hr)\n week_30sec_average.append(activity.peak_30sec_hr)\n week_60sec_average.append(activity.peak_60sec_hr)\n if activity.peak_5min_hr:\n week_5min_average.append(activity.peak_5min_hr)\n if activity.peak_10min_hr:\n week_10min_average.append(activity.peak_10min_hr)\n if activity.peak_20min_hr:\n week_20min_average.append(activity.peak_20min_hr)\n if activity.peak_30min_hr:\n week_30min_average.append(activity.peak_30min_hr)\n if activity.peak_60min_hr:\n week_60min_average.append(activity.peak_60min_hr)\n if activity.peak_90min_hr:\n week_90min_average.append(activity.peak_90min_hr)\n if activity.peak_120min_hr:\n week_120min_average.append(activity.peak_120min_hr)\n\n # Final footer.\n _print_footer(\n week_distance_total=week_distance_total,\n week_elevation_total=week_elevation_total,\n week_duration_total=week_duration_total,\n week_work_days=week_work_days,\n week_5sec_average=week_5sec_average,\n week_30sec_average=week_30sec_average,\n week_60sec_average=week_60sec_average,\n week_5min_average=week_5min_average,\n week_10min_average=week_10min_average,\n week_20min_average=week_20min_average,\n week_30min_average=week_30min_average,\n week_60min_average=week_60min_average,\n week_90min_average=week_90min_average,\n week_120min_average=week_120min_average,\n )\n\n # Print the summary.\n _print_summary(max)",
"def get_weekly_project_durations(self, week=0):\n\n # get the start and end of the desired week\n now = dt.datetime.now()\n monday = now.date() - dt.timedelta(days=now.weekday() + 7*week)\n nextmonday = monday + dt.timedelta(days=7)\n\n # get all jobs and associated projects for the selected week\n # there will be one row per job and associated project such that a job\n # which is assigned to two projects will also have two rows\n self.alog.dbcur.execute(\n 'WITH ja (id, start, dur, act) AS ('\n ' SELECT jobs.id, jobs.start, jobs.duration, activities.label '\n ' FROM jobs JOIN activities ON jobs.activity = activities.id '\n ' WHERE jobs.start >= ? AND jobs.start < ?) '\n 'SELECT ja.id, ja.start, ja.dur, ja.act, projects.label '\n 'FROM ja LEFT OUTER JOIN job_pj ON ja.id = job_pj.job '\n ' LEFT OUTER JOIN projects ON job_pj.project = projects.id',\n (monday, nextmonday))\n\n jobs = pd.DataFrame(self.alog.dbcur.fetchall(),\n columns=('id', 'start', 'duration', 'act',\n 'project'))\n\n # do the same thing for people, but do not select jobs here that have a\n # project associated with them\n # note that it's not necessary to outer join here, because I have already\n # got all the necessary information about jobs above\n self.alog.dbcur.execute(\n 'SELECT jobs.id, people.label '\n 'FROM jobs JOIN job_p, people '\n ' ON jobs.id = job_p.job AND job_p.person = people.id '\n 'WHERE jobs.start >= ? '\n ' AND jobs.start < ?'\n ' AND jobs.id NOT IN (SELECT job FROM job_pj)',\n (monday, nextmonday))\n\n j_p = pd.DataFrame(self.alog.dbcur.fetchall(),\n columns=('id', 'person'))\n\n # sort the people as projects into the job list\n ids = j_p.id.unique()\n for jid in ids:\n people = j_p[j_p.id == jid].person\n\n row = jobs[jobs.id == jid].copy()\n row.project = people.iloc[0]\n\n # add first person to the corresponding job\n jobs[jobs.id == jid] = row\n\n # if several people are associated with the job, add more rows to the\n # job list\n for person in people.values[1:]:\n row.project = person\n jobs = jobs.append(row, ignore_index=True)\n\n projects = pd.DataFrame(jobs.groupby('project').duration.sum(\n ).sort_values(ascending=False))\n acts = jobs.act.unique()\n\n for act in acts:\n projects[act] = 0\n\n for pj in projects.index:\n actdurs = jobs[jobs.project == pj].groupby('act').duration.sum()\n\n projects.loc[pj, actdurs.index] = actdurs\n\n # remove activities which did not occur in any of the projects\n # (these are project-independent activities)\n projects = projects.T[projects.sum() > 0].T\n\n return projects",
"def generate_report(df, start_date, end_date):\n # Remove any transactions that had to do with collecting or returning security\n security_df = df[(df[CATEGORY] == 'Security') | (df[CATEGORY] == 'Security-Income')]\n df = df[(df[CATEGORY] != 'Security')]\n\n # Exclude the data for everything except our quarter\n period_data = df.loc[start_date:end_date] # Note: NOT using extended quarter range\n rental_income = period_data[period_data[CATEGORY] == 'Rent']\n utilities = period_data[(period_data[CATEGORY] == 'Utilities')]\n other_income = period_data[(period_data['Transaction Type'] == 'credit') & (period_data[CATEGORY] != 'Rent')]\n expenses = period_data[(period_data['Transaction Type'] == 'debit')]\n unpaid_util_overages = float(0)\n\n # print(rental_income)\n # print(other_income)\n # print(expenses)\n \n html_config.initialize()\n print(html_config.HTML_OPEN)\n\n print('<H1>Income and Expense Report for %s-%s:' % (start_date, end_date), '</H1><p>')\n\n # List all unit specific rents and expenses for the quarter\n for UNIT in sorted(rental_income['Unit'].unique()):\n # Show rental income info\n temp_df = rental_income[rental_income['Unit'] == UNIT]\n print('<br><H2>Total rent for Unit ', UNIT, ': ${:,.2f}'.format(temp_df['Amount'].sum()), '</H2>')\n print(temp_df[['Description', 'Amount']].to_html())\n \n if not SKIP_UTIL_ANALYSIS:\n # Show utilities payments and calculate any overage due\n temp_df = utilities[(utilities['Unit'] == UNIT) & (utilities['Transaction Type'] == 'debit')]\n print('<br><H2>Utilities Expenses for Unit', UNIT, ': ${:,.2f}'.format(temp_df['Amount'].sum()), '</h2>')\n overage = temp_df.assign(Overage=lambda x: x.Amount - limit_df.loc[UNIT].Amount)\n # Disable warning when setting negative overage values to zero\n pd.set_option('mode.chained_assignment', None)\n overage.Overage[overage.Overage < 0] = 0\n pd.set_option('mode.chained_assignment', 'warn')\n print(overage[['Description', 'Amount', 'Overage']].to_html())\n # Show if utilties costs exceeded allotted amount\n if (overage['Overage'].sum() > 0):\n print('<h3>Utilities overage for unit', UNIT, ': ${:,.2f}'.format(overage['Overage'].sum()), '</h3>')\n unpaid_util_overages += overage['Overage'].sum()\n # Show any untilities that were collected \n overage_collected = utilities[(utilities['Unit'] == UNIT) & (utilities['Transaction Type'] == 'credit')]\n if not overage_collected.empty:\n print('<br><H2>Util overages collected for Unit ', UNIT, ': ${:,.2f}'.format(overage_collected['Amount'].sum()), '</H2>')\n print(overage_collected[['Description', 'Amount', CATEGORY]].to_html())\n print('<h3>Net Utils overage for unit', UNIT, 'for period: ${:,.2f}'.format(overage['Overage'].sum() - overage_collected['Amount'].sum()), '</h3>')\n \n\n \n # Generate unit specific Utility usage reports\n if GEN_TENANT_UTIL_REPORTS and OUTPUT_DIRECTORY:\n TENANT_FILE = '%s/122-Spring-St-%s-%s-Unit-%s-utils.html' % (OUTPUT_DIRECTORY, start_date, end_date, UNIT)\n TENANT_REPORTS.append(TENANT_FILE)\n sys.stdout = open(TENANT_FILE, 'w')\n print(html_config.HTML_OPEN)\n\n print('<H1>Unit', UNIT, '</H1>')\n print('<br><H2>Utilities Expenses for: %s-%s' % (start_date, end_date))\n print('<br>Utilites included in rent: ${:,.2f}'.format(limit_df.loc[UNIT].Amount))\n print('</H2>')\n print(overage[['Description', 'Amount', 'Overage']].to_html())\n # Show if any utilties overage may be due\n if (overage['Overage'].sum() > 0):\n print('<h3>Utilities overage for unit', UNIT, ': ${:,.2f}'.format(overage['Overage'].sum()), '</h3>')\n if not overage_collected.empty:\n print('<br><H2>Util overages collected for Unit ', UNIT, ': ${:,.2f}'.format(overage_collected['Amount'].sum()), '</H2>')\n print(overage_collected[['Description', 'Amount', CATEGORY]].to_html())\n print('<h3>Net Utils overage for unit', UNIT, 'for period: ${:,.2f}'.format(overage['Overage'].sum() - overage_collected['Amount'].sum()), '</h3>')\n\n print(html_config.HTML_CLOSE)\n\n # Restore stdout to the main report file\n sys.stdout = open(REPORT_FILE, 'a')\n \n # Show other unit specific transactions\n if SKIP_UTIL_ANALYSIS:\n unit_exp = expenses[(expenses['Unit'] == UNIT)]\n unit_income = other_income[other_income['Unit'] == UNIT]\n else:\n unit_exp = expenses[(expenses['Unit'] == UNIT) & (expenses[CATEGORY] != 'Utilities')]\n unit_income = other_income[(other_income['Unit'] == UNIT) & (other_income[CATEGORY] != 'Utilities')]\n \n if not unit_exp.empty:\n print('<br><H2>Other Unit specific expenses for: ', UNIT, ': ${:,.2f}'.format(unit_exp['Amount'].sum()), '</h2>')\n print(unit_exp[['Description', 'Amount', 'Unit', CATEGORY]].to_html())\n print('<p>')\n \n # Show any other unit specific credit\n other_income = other_income[other_income['Unit'] == UNIT]\n if not other_income.empty:\n print('<br><H2>Expense offsets for Unit ', UNIT, ': ${:,.2f}'.format(other_income['Amount'].sum()), '</H2>')\n print(other_income[['Description', 'Amount', CATEGORY]].to_html())\n \n # Add a line between units\n print('<hr>')\n \n # List the shared income and expenses for the quarter\n temp_df = other_income[other_income['Unit'].isnull()]\n if not temp_df.empty:\n print ('<br><H2>Non unit specific income: ${:,.2f}'.format(temp_df['Amount'].sum()), '</h2>')\n print(temp_df[['Description', 'Amount', CATEGORY]].to_html())\n gen_expenses = expenses[expenses['Unit'].isnull()]\n if not gen_expenses.empty:\n print ('<br><H1>Non unit specific expenses</h1>')\n # Get the list of expense categories and generate summary for each\n for category in sorted(gen_expenses[CATEGORY].unique()):\n temp_df = gen_expenses[(gen_expenses[CATEGORY] == category)]\n print ('<br><H2>'+ category +': ${:,.2f}'.format(temp_df['Amount'].sum()), '</h2>')\n print(temp_df[['Description', 'Amount', CATEGORY]].to_html())\n \n # If there were any security transactions in the period give a security report\n if not security_df.loc[start_date:end_date].empty:\n temp_df = security_df.loc[start_date:end_date] \n print('<hr><H2>Security related transactions:</H2>')\n print(temp_df[['Description', 'Amount', 'Transaction Type', 'Unit']].to_html())\n for UNIT in sorted(rental_income['Unit'].unique()):\n unit_df = security_df[security_df['Unit'] == UNIT]\n collected = unit_df[(unit_df['Transaction Type'] == 'credit')]['Amount'].sum()\n returned = unit_df[(unit_df['Transaction Type'] == 'debit')]['Amount'].sum()\n print('<center><H4>Current Liability on Unit '+str(UNIT)+': ${:,.2f}'.format(collected-returned), '</H4></center>')\n \n # # Summarize the periods income and expenses -- old way to be discarded...\n # print('<br><H3>Total income for period: ${:,.2f}'.format(rental_income['Amount'].sum()), '</H3>')\n # print('<H3>Total expense for period: ${:,.2f}'.format(expenses['Amount'].sum() - other_income['Amount'].sum()), '</H3>')\n # print('<H3>Profit for period: ${:,.2f}'.format(rental_income['Amount'].sum() + other_income['Amount'].sum() -expenses['Amount'].sum()), '</H3>')\n # Summarize the periods income and expenses\n print('<br><H3>Total income for period: ${:,.2f}'.format(rental_income['Amount'].sum()))\n print('<br><H3>Total expense for period: ${:,.2f}'.format(expenses['Amount'].sum() - other_income['Amount'].sum()), '</H3>')\n print('<H3>Profit for period: ${:,.2f}'.format(rental_income['Amount'].sum() + other_income['Amount'].sum() -expenses['Amount'].sum()), '</H3>')\n# print('</H3>')\n \n print(html_config.HTML_CLOSE)\n sys.stdout.flush()",
"def compute_weekends(start, end):\n # Initialize the weekends counter\n weekends = 0\n\n # Do-while loop (to check if the start date falls on a weekend too)\n while True:\n # Check if the day falls on a weekend\n if start.weekday() == 5 or start.weekday() == 6:\n weekends += 1\n\n # The loop checks the days between the start date (inclusive) and\n # the next occurence of the end date's day of the week\n if start.weekday() == end.weekday():\n break\n\n # Increment the start date by one day\n start += datetime.timedelta(days=1)\n\n # Once the start date and the end date fall on the same day of the week,\n # we can just find the number of weeks between them and multiply\n # by two\n weekends += ((end - start).days // 7) * 2\n return weekends",
"def workweeks(yr):\n\n # TODO: MOVE all of this crap into a intelDateTime.py module. Does not belong here. JSS\n\n nyd = datetime.date(yr, 1, 1).weekday() # Determine the day of the week on which the 1st of January fell this year.\n if nyd == 5: return 53 # If the 1st of January fell on a Saturday, the year has 53 weeks.\n if nyd == 4 and isleapyear(yr): return 53 # Same deal if the 1st of January fell on a Friday in a leap year.\n return 52 # All other years have 52 work weeks.",
"def add_weekly_validation_burndown_section(self):\n cursor = self.db_conn.cursor()\n\n # Gets a count of non-test/ghost participants with a participant_summary (e.g, RDR got a primary consent),\n # if the primary consent authored date was on/before the end date for this report\n sql = CONSENTED_PARTICIPANTS_COUNT_SQL + ORIGIN_SQL_FILTER\n cursor.execute(sql.format_map(SafeDict(end_date=self.end_date.strftime(\"%Y-%m-%d\"),\n origin_filter=self.origin_value.lower())))\n consented_count = cursor.fetchone()[0]\n\n # Gets a count of non-test/ghost participants whose consents have been validated\n # (participant has entries in consent_file table), if the consent_file entry was created on/before the\n # end date for this report\n sql = VALIDATED_PARTICIPANTS_COUNT_SQL + ORIGIN_SQL_FILTER\n cursor.execute(sql.format_map(SafeDict(end_date=self.end_date.strftime(\"%Y-%m-%d\"),\n origin_filter=self.origin_value.lower())))\n validated_count = cursor.fetchone()[0]\n\n # Pandas: Gets the number of unique participant_id values from the main (unresolved errors) dataframe\n # that was created at the start of the weekly report generation\n participants_with_errors = self.consent_df['participant_id'].nunique()\n\n participants_no_issues = validated_count - participants_with_errors\n participants_need_validation = consented_count - validated_count\n\n burndown_data = [\n ['DRC CONSENT VALIDATION BURNDOWN'],\n ['',\n 'Total Consented Participants',\n 'Participants With No Consent Issues Detected',\n 'Participants With Unresolved Issues (for 1 or more consent types)',\n 'Participants Not Yet Validated'],\n ['Participant Counts',\n self.format_number(consented_count),\n self.format_number(participants_no_issues),\n self.format_number(participants_with_errors),\n self.format_number(participants_need_validation)\n ]\n ]\n\n start_burndown_row = self.row_pos\n end_burndown_row= start_burndown_row + len(burndown_data)\n burndown_cell_range = self._make_a1_notation(start_burndown_row, end_col=5, end_row=end_burndown_row)\n self._add_report_rows(burndown_cell_range, burndown_data)\n\n # Format the burndown sub-table header and column headers\n self._add_report_formatting(self._make_a1_notation(start_burndown_row, end_col=5),\n self.format_specs.get('burndown_header_row'))\n self._add_report_formatting(self._make_a1_notation(start_burndown_row + 1, end_col=5),\n self.format_specs.get('burndown_column_headers'))\n # Format the burndown sub-table content row (first column is bolded)\n self._add_report_formatting(self._make_a1_notation(end_burndown_row - 1),\n self.format_specs.get('bold_small_wrapped'))\n\n # Inject whitespace after the validation burndown details\n self.row_pos = end_burndown_row + 3",
"def _load_days_lists(self):\n school_year = \\\n SchoolDB.models.get_school_year_for_date(\n self.start_date)\n for i in range(0, self.total_days_count):\n day = self.start_date + timedelta(i)\n if (not school_year or (not school_year.in_block(day))):\n morning_type = afternoon_type = 0\n day_description = \"Not in school year.\"\n elif (i > self.days_count):\n morning_type = afternoon_type = \\\n SchoolDB.models.StudentAttendanceRecord.valid\n day_description = \"In the future.\"\n else:\n morning_type = afternoon_type = \\\n SchoolDB.models.StudentAttendanceRecord.valid\n morning_school_day, afternoon_school_day, day_description = \\\n SchoolDB.models.is_school_day(day,\n self.section)\n if morning_school_day:\n morning_type |= \\\n SchoolDB.models.StudentAttendanceRecord.school_day\n if afternoon_school_day:\n afternoon_type |= \\\n SchoolDB.models.StudentAttendanceRecord.school_day\n self.dayperiod_type.append(morning_type)\n self.dayperiod_type.append(afternoon_type)\n self.day_description.append(day_description)\n self.date_list.append(day.toordinal())",
"def compute_holidays(start, end):\n # The list of holidays and their given dates every year\n holiday_dates = {\n \"new year holiday:\": (1, 1),\n \"labor day holiday:\": (5, 1),\n \"all saints day holiday:\": (11, 1),\n \"christmas holiday:\": (12, 25)\n }\n\n # Initialize the count of occurrences per holiday\n holiday_counts = {holiday: 0 for holiday in holiday_dates.keys()}\n # For loop to go through each holiday\n for holiday in holiday_dates.keys():\n # Sets the year for when counting the occurrences start\n count_start = start.year\n # If the holiday occurs before the start date, we disregard it\n if (start - datetime.date(start.year, *holiday_dates[holiday])).days > 0:\n count_start += 1\n # Sets the year for when counting the occurrences end\n count_end = end.year\n # If the holiday occurs after the end date, we disregard it\n if (datetime.date(end.year, *holiday_dates[holiday]) - end).days > 0:\n count_end -= 1\n # For loop to go through each year in the counting range\n for year in range(count_start, count_end + 1):\n # If the holiday falls on a weekday, we increment the occurrence count\n if datetime.date(year, *holiday_dates[holiday]).weekday() < 5:\n holiday_counts[holiday] += 1\n\n # The total number of holidays is the sum of the counts of each holiday\n holiday_counts[\"total holidays:\"] = sum(holiday_counts.values())\n\n # Returns the dictionary with complete counts\n return holiday_counts",
"def weekly_digest(timeframe_from: int, timeframe_to: int,\n lines: List[List[List[str]]], times: List[List[datetime]],\n colored: bool) -> None:\n num_weeks = ceil((timeframe_from - timeframe_to) / 7)\n weekly_times = [0 for _ in range(num_weeks)]\n dates = [times[0][1]]\n beg_date = times[0][0].date()\n week = 0\n for start, stop in times:\n if ((stop.date() - beg_date).days // 7) != week:\n dates.append(stop)\n week += 1\n weekly_times[week] += (stop - start).seconds\n\n total_seconds = sum(weekly_times)\n average_seconds = total_seconds // num_weeks\n\n if colored:\n print(f'Chosen display: {colors.FG.BRIGHT.RED}WEEKLY DIGEST'\n f'{colors.RESET}\\n')\n else:\n print('Chosen display: WEEKLY DIGEST\\n')\n\n for date, weekly_time in zip(dates, weekly_times):\n end = date + timedelta(days=6)\n start_date = date.strftime(DATE_FORMAT_PATTERN)\n end_date = end.strftime(DATE_FORMAT_PATTERN)\n display_summary(f'{start_date} - {end_date}', weekly_time, colored)\n print()\n\n display_summary('\\nAverage', average_seconds, colored, ' per week')\n display_summary('Total', total_seconds, colored)"
] | [
"0.7783672",
"0.5774272",
"0.56252056",
"0.5575767",
"0.5506491",
"0.5342154",
"0.5337909",
"0.52582586",
"0.52426636",
"0.5213161",
"0.5204552",
"0.5204155",
"0.5193903",
"0.51596296",
"0.51428",
"0.5108332",
"0.510329",
"0.50756997",
"0.504167",
"0.502892",
"0.5019758",
"0.5013292",
"0.5004537",
"0.4982526",
"0.4951662",
"0.49167207",
"0.4903531",
"0.48994958",
"0.48828033",
"0.48785007"
] | 0.7266772 | 1 |
Computes normalized quantile loss for torch tensors. Uses the qRisk metric as defined in the "Training Procedure" section of the main TFT paper. | def normalized_quantile_loss(actuals: torch.Tensor, predictions: torch.Tensor, quantiles: List[float] = None) -> torch.Tensor:
normalizer = torch.sum(abs(actuals))
if quantiles == None:
QL = QuantileLoss(quantiles=[0.1, 0.5, 0.9])
else:
QL = QuantileLoss(quantiles=quantiles)
q_loss = QL.loss(y_pred = predictions, target = actuals)
reduced_q_loss = torch.sum(q_loss.reshape(-1, q_loss.shape[-1]), 0)
normalized_loss = 2 * reduced_q_loss / normalizer
return normalized_loss | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def numpy_normalised_quantile_loss(self, y_pred, y, quantile):\n if not isinstance(y_pred, paddle.Tensor):\n y_pred = paddle.to_tensor(y_pred,paddle.float32)\n\n if len(y_pred.shape) == 3:\n ix = self.quantiles.index(quantile)\n y_pred = y_pred[..., ix]\n\n if not isinstance(y, paddle.Tensor):\n y = paddle.to_tensor(y,paddle.float32)\n\n prediction_underflow = y - y_pred\n weighted_errors = quantile * paddle.maximum(prediction_underflow, paddle.to_tensor(0.,paddle.float32)) \\\n + (1. - quantile) * paddle.maximum(-prediction_underflow, paddle.to_tensor(0.))\n\n quantile_loss = paddle.mean(weighted_errors)\n normaliser = paddle.abs(y).mean()\n\n return 2 * quantile_loss / normaliser",
"def quantile_loss(pred, true, quantiles, mask=None, weights=None):\n \n assert(len(quantiles)==pred.shape[2])\n \n Q = torch.cat([q*torch.ones_like(true) for q in quantiles], dim=2)\n pinball = Q*F.relu(true - pred) + (1.-Q)*F.relu(pred - true)\n mean_pinball = pinball.mean(axis=[0,1])\n return mean_pinball",
"def transfo_quantile(xx):\n\n # nn = np.zeros(len(xx))\n # oo = np.argsort(xx)\n # nn[oo] = np.arange(len(xx)) / len(xx) + 1 / (2 * len(xx))\n # return nn\n\n return rankdata(xx) / len(xx) - 1 / (2 * len(xx))",
"def to_quantiles(self, y_pred: torch.Tensor) -> torch.Tensor:\n return self.metrics[0].to_quantiles(y_pred)",
"def compute_quantile(risk, T_max: int, scenario_numbers, quantile):\r\n\r\n print(\"\\tComputing Quantile...\")\r\n # Init quantile\r\n q = np.zeros(T_max)\r\n for t in range(T_max):\r\n risk[t].sort()\r\n q[t] = risk[t][int(np.ceil(scenario_numbers[t] * quantile)) - 1]\r\n print(\"\\tDone\")\r\n\r\n return q",
"def to_quantiles(self, y_pred: torch.Tensor) -> torch.Tensor:\n if y_pred.ndim == 2:\n y_pred = y_pred.unsqueeze(-1)\n return y_pred",
"def loss_function(self, x, x_hat_logit, mu, log_sigma):\n rec_loss = nn.functional.binary_cross_entropy_with_logits(x_hat_logit, x, size_average=False)\n kl_loss = -0.5 * torch.sum(1 + log_sigma - mu.pow(2) - log_sigma.exp())\n\n return rec_loss + (kl_loss), rec_loss, kl_loss",
"def _compute_loss(self):\n state, action, reward, next_state, done = self.replay_buffer.sample(self.batch_size)\n\n state = torch.FloatTensor(state)\n next_state = torch.FloatTensor(next_state)\n action = torch.LongTensor(action)\n reward = torch.FloatTensor(reward)\n done = torch.FloatTensor(done)\n\n q_values = self.dqn(state)\n q_value = q_values.gather(1, action.unsqueeze(1)).squeeze(1)\n\n next_q_values = self.target_dqn(next_state)\n next_q_value = next_q_values.max(1)[0]\n target = reward + self.discount_factor * next_q_value * (1 - done)\n\n # loss = F.smooth_l1_loss(q_value, target.detach())\n loss = F.mse_loss(q_value, target.detach())\n\n return loss",
"def __call__(self, y_pred, y_true):\n dy = y_pred - y_true\n n = self.quantiles.size()[0]\n qs = self.quantiles.reshape((n,) + (1,) * max(len(dy.size()) - 2, 0))\n l = torch.where(dy >= 0.0, (1.0 - qs) * dy, (-qs) * dy)\n if self.mask:\n l = torch.where(y_true == self.mask, torch.zeros_like(l), l)\n return l.mean()",
"def quality(X, boost=2):\n X = np.abs(np.sort(X, axis=-1).astype(float))\n Q = 1 - np.log(2 + X[..., -2]) / np.log(2 + X[..., -1])\n Q = (Q * 2).clip(0, 1)\n return Q",
"def default_quantile():\n return np.logspace(-5, 0, 100)",
"def quantile_concrete(self, u):\n y = torch.sigmoid((torch.log(u) - torch.log(1.0 - u) + self.qz_log_alpha) / self.beta)\n return y * (self.zeta - self.gamma) + self.gamma",
"def __call__(self, img: torch.Tensor) -> torch.Tensor:\n img_np: np.ndarray = img.detach().cpu().numpy()\n quantile: float = min(self.num_pixels / img_np.size, 1) \\\n if self.num_pixels is not None else self.quantile\n thresh: float = np.quantile(img_np, 1 - quantile)\n img = (img > thresh).float()\n return img",
"def loss_function(self, x, p_x_given_z_logits, mu, log_sigma):\n rec_loss = nn.functional.binary_cross_entropy_with_logits(p_x_given_z_logits, x, reduction=\"none\").sum(dim=1)\n kl_loss = 0.5 * torch.sum(torch.exp(log_sigma) + mu**2 - log_sigma - 1., dim=1)\n\n return torch.mean(rec_loss + kl_loss), rec_loss.mean(), kl_loss.mean()",
"def computeQuantizationError(origImg: np.ndarray, quantizedImg: np.ndarray) -> int:\n ######################################################################################\n ## TODO: YOUR CODE GOES HERE ##\n ######################################################################################\n\n quantizationError = np.sum(np.square(origImg - quantizedImg))\n\n ######################################################################################\n ## YOUR CODE ENDS HERE ##\n ######################################################################################\n return quantizationError",
"def compute_loss(self, x, gt):\n loss = sum([torch.mean((out - gt)**2) for out in self.forward(x)])\n return loss",
"def quantization_error(self, data):\n self._check_input_len(data)\n return norm(data-self.quantization(data), axis=1).mean()",
"def calc_q_loss(self, batch):\n states = batch['states']\n next_states = batch['next_states']\n\n q_preds = self.qnet(states)\n with torch.no_grad():\n next_q_preds = self.qnet(next_states)\n\n act_q_preds = q_preds.gather(-1, batch['actions'].long().unsqueeze(-1)).squeeze(-1)\n act_next_q_preds = next_q_preds.gather(-1,\n batch['next_actions'].long().unsqueeze(-1)).squeeze(-1)\n\n act_q_targets = batch['rewards'] + self.gamma * (1 - batch['dones']) * act_next_q_preds\n q_loss = nn.functional.mse_loss(act_q_preds, act_q_targets)\n return q_loss",
"def get_percentile(self, q):\n raise NotImplementedError(\"This is an abstract method and needs to be implemented in derived classes.\")",
"def get_percentile(self, q):\n return None",
"def get_percentile(self, q):\n return None",
"def quantile(self, q: float) -> np.array:\n assert 0 <= q <= 1\n return np.array(\n list(\n chain.from_iterable(\n model.predict(self.featurized_data, q)\n for model in self.models\n )\n )\n )",
"def kernel_quantile_heuristic(X, q=0.5):\n pairwise_sq_dists = pdist(X, 'sqeuclidean')\n quantile_heuristic_s2 = np.quantile(pairwise_sq_dists, q=q)\n return quantile_heuristic_s2",
"def test_valid_calculation_of_quantile(alpha: Any) -> None:\n n = 30\n check_alpha_and_n_samples(alpha, n)",
"def posterior_loss(X, mu, sigma, log_pi):\r\n log_PDF = log_GaussPDF(X, mu, sigma)\r\n log_post = log_posterior(log_PDF, log_pi)\r\n\r\n loss = torch.logsumexp(log_post, dim=1)\r\n # loss = torch.exp(log_post)\r\n # loss = torch.sum(loss, dim=1)\r\n # loss = torch.log(loss)\r\n loss = torch.sum(loss)\r\n loss = -loss\r\n return loss",
"def advantage(self, state, Q: torch.Tensor = None):\n return Q - Q.max()\n # return Q - torch.matmul(self.π.pmf(state, action_values=Q), Q)",
"def top_quantile_positive_constraint(x: Tensor, q: float, bias: float = 0.1) -> Tensor:\n\n if x.dim() != 2:\n raise ValueError(\n f'operation only supported for 2D tensors (got {x.dim()}D).'\n )\n\n if not (0. <= q <= 1.):\n raise ValueError(\n f'`q` must be in range [0, 1] but is {q}.'\n )\n\n if q == 0.:\n return torch.relu(-x.max(dim=1)[0]).mean() + bias\n if q == 1.:\n return positive_constraint(x, bias=bias)\n else:\n k = int(q * x.size(1))\n return torch.relu(-x.topk(k, dim=1)[0]).mean() + bias",
"def _build_target_quantile_values_op(self):\n batch_size = tf.shape(self._replay.rewards)[0]\n ###### Munchausen-specific\n replay_action_one_hot = tf.one_hot(\n self._replay.actions, self.num_actions, 1., 0., name='action_one_hot')\n # tau * ln pi_k+1 (s')\n replay_next_log_policy = utils.stable_scaled_log_softmax(\n self._replay_next_target_q_values, self.tau, axis=1)\n # tau * ln pi_k+1(s)\n replay_log_policy = utils.stable_scaled_log_softmax(\n self._replay_target_q_values, self.tau, axis=1)\n replay_next_policy = utils.stable_softmax( # pi_k+1(s')\n self._replay_next_target_q_values, self.tau, axis=1)\n\n tau_log_pi_a = tf.reduce_sum( # ln pi_k+1(a|s)\n replay_log_policy * replay_action_one_hot, axis=1)\n\n tau_log_pi_a = tf.clip_by_value(\n tau_log_pi_a, clip_value_min=self.clip_value_min, clip_value_max=0)\n\n munchuasen_term = self.alpha * tau_log_pi_a\n #########\n\n # Shape of rewards: (num_tau_prime_samples x batch_size) x 1.\n rewards = self._replay.rewards[:, None] + munchuasen_term[Ellipsis, None]\n rewards = tf.tile(rewards, [self.num_tau_prime_samples, 1])\n\n is_terminal_multiplier = 1. - tf.cast(self._replay.terminals, tf.float32)\n # Incorporate terminal state to discount factor.\n # size of gamma_with_terminal: (num_tau_prime_samples x batch_size) x 1.\n gamma_with_terminal = self.cumulative_gamma * is_terminal_multiplier\n gamma_with_terminal = tf.tile(gamma_with_terminal[:, None],\n [self.num_tau_prime_samples, 1])\n\n # shape: (batch_size * num_tau_prime_samples) x num_actions\n replay_next_policy_ = tf.tile(replay_next_policy,\n [self.num_tau_prime_samples, 1])\n replay_next_log_policy_ = tf.tile(replay_next_log_policy,\n [self.num_tau_prime_samples, 1])\n\n # shape: (batch_size * num_tau_prime_samples) x 1\n replay_quantile_values = tf.reshape(\n self._replay_net_target_quantile_values,\n [batch_size * self.num_tau_prime_samples, self.num_actions])\n\n # shape: (batch_size * num_tau_prime_samples) x num_actions\n weighted_logits = (\n replay_next_policy_ * (replay_quantile_values\n - replay_next_log_policy_))\n\n # shape: (batch_size * num_tau_prime_samples) x 1\n target_quantile_values = tf.reduce_sum(weighted_logits, axis=1,\n keepdims=True)\n\n return rewards + gamma_with_terminal * target_quantile_values",
"def _quantile_normalization(arr, mode=\"mean\"):\n n = len(arr)\n perc = percentileofscore\n arr_ = arr.copy()[~np.isnan(arr)]\n out = np.zeros(n)\n for i in range(n):\n if not np.isnan(arr[i]):\n out[i] = norm.ppf(perc(arr_, arr[i], mode) / 100.)\n else:\n out[i] = np.nan\n return out",
"def loss_scale(self):\n return self._loss_scale"
] | [
"0.70270723",
"0.6815313",
"0.58613586",
"0.58225995",
"0.56911457",
"0.5627937",
"0.5546004",
"0.5536235",
"0.54864985",
"0.54816455",
"0.547545",
"0.54679716",
"0.546322",
"0.54552704",
"0.53829426",
"0.5378242",
"0.53772986",
"0.53498733",
"0.5348454",
"0.534481",
"0.534481",
"0.5344237",
"0.5332005",
"0.5245817",
"0.5240364",
"0.523602",
"0.5234447",
"0.52313656",
"0.5220603",
"0.5189319"
] | 0.7660437 | 0 |
Sets cookie if user exists and passes status. | def set_cookie():
decoded_request = json.loads(request.data)
email = decoded_request['email']
password = decoded_request['password']
incorrect_pw_msg = 'Oh no, this does not look like its correct. Please try again.'
user = crud.get_user_by_email(email)
correct_password = crud.is_correct_password(email, password)
if correct_password:
session["user"] = user.user_id
response = make_response({}, 200)
response.set_cookie("logged-in", "true")
return response
else:
response = jsonify({"error": incorrect_pw_msg}), status.HTTP_400_BAD_REQUEST
return response | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_user_cookie_id():\n #new fresh user\n if not request.cookies.get(config.COOKIE_ADSABS2_NAME):\n if current_user.is_anonymous():\n g.user_cookie_id = unicode(uuid.uuid4())\n else:\n g.user_cookie_id = current_user.get_id()\n #the user has already visited the web site\n else:\n if current_user.is_anonymous():\n #if the cookie is a valid UUID it's ok\n curr_cookie = request.cookies.get(config.COOKIE_ADSABS2_NAME)\n try:\n uuid.UUID(curr_cookie)\n g.user_cookie_id = curr_cookie\n #otherwise the app generates a new one\n except ValueError:\n g.user_cookie_id = unicode(uuid.uuid4())\n else:\n g.user_cookie_id = current_user.get_id()",
"def post(self):\n # original code\n self.set_secure_cookie(\"user\", self.get_argument(\"name\"))\n self.redirect(\"/\")",
"def set_cookie( name, value, **kwargs ) :",
"def login(self, user):\n self.set_secure_cookie('user_id', str(user.key().id()))",
"def login(self, user):\n self.set_secure_cookie('user_id', str(user.key().id()))",
"def test_set_user_info_cookie(self):\n cookie_value = '%s:True:%s' % (EMAIL, USER_ID)\n expected_result = '%s=\"%s\"; Path=/' % (COOKIE_NAME, cookie_value)\n\n result = login._set_user_info_cookie(EMAIL, True, cookie_name=COOKIE_NAME)\n\n self.assertEqual(expected_result, result)",
"def set_cookie(self, response):\n if self._invalidated:\n response.delete_cookie(\n key=self.app.conf.flask.session_cookie_name,\n path=self.app.conf.flask.session_cookie_path,\n domain=self.app.conf.flask.session_cookie_domain,\n )\n return\n response.set_cookie(\n key=self.app.conf.flask.session_cookie_name,\n value=self.meta.cookie_val,\n domain=self.app.conf.flask.session_cookie_domain,\n path=self.app.conf.flask.session_cookie_path,\n secure=self.app.conf.flask.session_cookie_secure,\n httponly=self.app.conf.flask.session_cookie_httponly,\n samesite=self.app.conf.flask.session_cookie_samesite,\n max_age=self.app.conf.flask.permanent_session_lifetime,\n )",
"def saveCookie(self, resp):\n #save Cookie\n if resp.has_key('set-cookie'):\n self.updateHeaders('Cookie', resp['set-cookie'])\n print '--', 'Save cookie : ', resp['set-cookie']",
"def get(self):\n self.response.headers.add_header(\"Set-Cookie\", \"user_id=; Path=/\")\n self.redirect(\"/signup\")",
"def setup_user():\n if 'auth_user' in flask.session:\n user = models.User.query.get(flask.session['auth_user'])\n if user is None:\n # old bad cookie, no good\n del flask.session['auth_user']\n # save the user in `flask.g`, which is a set of globals for this request\n flask.g.user = user",
"def update_session(user):\n\n # Setup/update cookie\n user.cookie = token_urlsafe(64)\n user.cookie_expiration = datetime.now() + timedelta(hours=2)\n\n # Commit\n db.session.add(user)\n db.session.commit()\n\n cookie = user.cookie\n return cookie",
"def set_logged_in_cookies(request, response, user):\n # Note: The user may not yet be set on the request object by this time,\n # especially during third party authentication. So use the user object\n # that is passed in when needed.\n\n if user.is_authenticated and not user.is_anonymous:\n\n # JWT cookies expire at the same time as other login-related cookies\n # so that cookie-based login determination remains consistent.\n cookie_settings = standard_cookie_settings(request)\n\n _set_deprecated_logged_in_cookie(response, cookie_settings)\n _set_deprecated_user_info_cookie(response, request, user, cookie_settings)\n _create_and_set_jwt_cookies(response, request, cookie_settings, user=user)\n CREATE_LOGON_COOKIE.send(sender=None, user=user, response=response)\n\n return response",
"def set_cookie( cookies, name, morsel, **kwargs ) :",
"def add_cookies_enabled_test(response, cookies):\n \n # If authenticaten is required don't do anything\n if AUTHENTICATION_REQUIRED:\n return \n if not cookies.get(COOKIES_ENABLED_KEY, None):\n response.set_cookie(COOKIES_ENABLED_KEY, 'True', \n max_age=COOKIES_ENABLED_MAX_AGE,\n domain=COOKIE_DOMAIN)",
"def test_custom_cookie_used(self):\n factory = self._make_factory_custom()\n request = self._make_request()\n\n session = factory(request)\n session[\"a\"] = 1 # we only create a cookie on edit\n\n response = webob.Response()\n request.response_callbacks[0](request, response)\n hdrs_sc = response.headers.getall(\"Set-Cookie\")\n self.assertEqual(len(hdrs_sc), 1)\n self.assertEqual(response.vary, (\"Cookie\",))\n\n assert session.session_id in hdrs_sc[0]\n raw_sessionid_cookie = \"session=%s; Path=/; HttpOnly\" % session.session_id\n assert raw_sessionid_cookie in hdrs_sc",
"def setCookieFile(self, cookie):\n if os.path.isfile(cookie):\n jc = jsoncookie.jsoncookie()\n jc.open(cookie)\n self.cookiejar = jc.cookiejar(self.server)\n jc.close()",
"def __cookieAdded(self, cookie):\n if self.__rejectCookie(cookie, cookie.domain()):\n self.__store.deleteCookie(cookie)\n return\n \n self.insertCookie(cookie)\n self.cookiesChanged.emit()",
"def on_before(self, controller):\n session_id = controller.get_cookie(self.session_config.cookie_id)\n cookie_id = str(self.session_config.auth_cookie)\n userid = controller.get_secure_cookie(cookie_id)\n user = None\n if userid:\n sname = self.session_config.auth_service\n logger.debug(self.application.models)\n auth_service = self.application.models[sname]\n user = auth_service.auth(userid)\n if user:\n if not session_id:\n session_id = self.gen_session_id(controller)\n setattr(user, 'just_signin', True)\n setattr(user, 'session_id', session_id)\n threadlocal.set_user(user)\n \n if not session_id:\n session_id = self.gen_session_id(controller)\n threadlocal.set_sessionid(session_id)\n threadlocal.set_ip(controller.request.remote_ip)\n if session_id:\n controller.set_cookie(self.session_config.cookie_id, session_id)\n\n if not user and controller.require_auth:\n h = controller.request.headers.get('X-Requested-With', None)\n if h and h == 'XMLHttpRequest':\n raise tornado.web.HTTPError(403, self.__class__.__name__)\n else:\n if controller.request.method in (\"GET\", \"HEAD\"):\n url = controller.get_login_url()\n if \"?\" not in url:\n if urlparse.urlsplit(url).scheme:\n # if login url is absolute, make next absolute too\n next_url = controller.request.full_url()\n else:\n next_url = controller.request.uri\n url += \"?\" + urllib.urlencode(dict(next=next_url))\n controller.redirect(url)\n else:\n raise tornado.web.HTTPError(403, self.__class__.__name__)",
"def create_or_update(cls, session, username, password, cookies):\n cookie = session.query(Cookie). \\\n filter(Cookie.username == username).one_or_none()\n\n password = encode_passwd(password)\n\n if cookie:\n cookie.password = password\n cookie.cookie = cookies\n cookie.updated_at = text('NOW()')\n else:\n cookie = Cookie(username=username, password=password, cookie=cookies)\n session.add(cookie)",
"def _update_cookie(self, encoded_data, response):\n if encoded_data:\n response.set_cookie(\n self.cookie_name,\n encoded_data,\n domain=settings.SESSION_COOKIE_DOMAIN,\n secure=settings.SESSION_COOKIE_SECURE or None,\n httponly=settings.SESSION_COOKIE_HTTPONLY or None,\n samesite=settings.SESSION_COOKIE_SAMESITE,\n )\n else:\n response.delete_cookie(\n self.cookie_name,\n domain=settings.SESSION_COOKIE_DOMAIN,\n samesite=settings.SESSION_COOKIE_SAMESITE,\n )",
"def set_logged_in_user(self, user={}, check_session=False):\n\n try:\n if type(self.cache) is Cache:\n if check_session:\n sessionId = self.get_session_id_from_cookie()\n if sessionId:\n self.sessionId = sessionId\n self.cache.set('user.sessionId', sessionId)\n else:\n raise Exception('Could not login. Please try again.')\n\n if 'id' in user:\n self.cache.set('user.id', user['id'])\n if 'username' in user:\n self.cache.set('user.username', user['username'])\n if 'profileUrl' in user:\n self.cache.set('user.profileUrl', user['profileUrl'])\n if 'id' not in user:\n user_id = self._parse_user_id_from_url(user['profileUrl'])\n if user_id:\n self.userId = user_id\n self.cache.set('user.id', user_id)\n if 'avatarUrl' in user:\n self.cache.set('user.avatarUrl', user['avatarUrl'])\n if 'reputation' in user:\n self.cache.set('user.reputation', user['reputation'])\n if 'badge1' in user:\n self.cache.set('user.badge1', user['badge1'])\n if 'badge2' in user:\n self.cache.set('user.badge2', user['badge2'])\n if 'badge3' in user:\n self.cache.set('user.badge3', user['badge3'])\n \n return True\n else:\n raise Exception('Could not save data.')\n except:\n Utils.log(traceback.format_exc())\n utils.error('Could not login. Please try again.')",
"def login(self, user, remember=False):\n\n self.set_secure_cookie('user_id', str(user.key().id()), remember)",
"def test_default_cookie_used(self):\n factory = self._make_factory_default()\n request = self._make_request()\n\n session = factory(request)\n session[\"a\"] = 1 # we only create a cookie on edit\n\n response = webob.Response()\n request.response_callbacks[0](request, response)\n hdrs_sc = response.headers.getall(\"Set-Cookie\")\n self.assertEqual(len(hdrs_sc), 1)\n self.assertEqual(response.vary, (\"Cookie\",))\n\n assert session.session_id not in hdrs_sc[0]\n raw_sessionid_cookie = \"session=%s; Path=/; HttpOnly\" % session.session_id\n assert raw_sessionid_cookie not in hdrs_sc",
"def syncrepl_set_cookie(self, cookie):\n pass",
"def login(self, cookie: str, info: Dict[str, Any]):\n if cookie is None:\n self.logout()\n else:\n if self.username is None:\n raise ValueError(\n 'Cannot login without a username (hint: use \"change_user\" instead of \"login\")'\n )\n self.change_user(self.username, cookie, info)",
"def set_secure_cookie( name, value, **kwargs ):",
"def set_cookie(self, cookie):\n c = self._cookies\n if cookie.domain not in c:\n c[cookie.domain] = {}\n c2 = c[cookie.domain]\n if cookie.path not in c2:\n c2[cookie.path] = {}\n c3 = c2[cookie.path]\n c3[cookie.name] = cookie",
"def store_userid(request_handler, userid):\n session = sessions.LilCookies(request_handler, SESSION_SECRET)\n session.set_secure_cookie(name='userid', value=userid)",
"def setFlashCookie(self, flashCookie):\n pass",
"def cookie_login(self, username):\n self.driver.get(self.url)\n with open(self._cookie_file(username), \"r\") as f:\n cookies = json.load(f)\n for cookie in cookies:\n self.driver.add_cookie(cookie)\n\n self.driver.refresh()"
] | [
"0.7051497",
"0.6615434",
"0.63359445",
"0.63336045",
"0.63336045",
"0.6249339",
"0.6206051",
"0.6161316",
"0.61472225",
"0.61126524",
"0.60267013",
"0.6026603",
"0.59424293",
"0.59401476",
"0.5921613",
"0.5901393",
"0.5874812",
"0.58216405",
"0.5815492",
"0.58044195",
"0.5800076",
"0.57735544",
"0.5734247",
"0.5722373",
"0.56716853",
"0.56701434",
"0.5668483",
"0.56633115",
"0.56615853",
"0.56600183"
] | 0.68235534 | 1 |
Subsets and Splits