query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Return True if the given element is in this view.
def is_element_in_view(self, element: Element) -> bool: return self.find_element_view(element=element) is not None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def contains(self, element) -> bool:\n\n return self.__find_node(element) is not None", "def __contains__(self, item):\n\n if self.is_view:\n return item in self._view\n return item in self._storage", "def contains(self, element):\n pass", "def is_element_visible(self):\n if self.web_element.is_displayed():\n return True\n else:\n return False", "def __contains__(self, elem):\n return elem in list(self)", "def isElement(self, elementXpath):\r\n try:\r\n self.browser.find_element_by_xpath(elementXpath)\r\n return True\r\n except:\r\n return False", "def _is_element_present():\r\n return self.q(css=element_selector).present", "def is_viewed(self):\n return self.has_label(VIEWED_LABEL)", "def check_element(self, e):\n my_view = {}\n if self.content_mimetype is not None:\n my_view[\"mimetype\"] = self.content_mimetype\n if self.content_model is not None:\n my_view[\"model\"] = self.content_model\n\n if self.element_constraint is not None:\n ret = self.element_constraint.apply_to(e)\n else:\n ret = True\n return ret & apply_to(my_view, e)", "def check_parent_and_children_not_in_view(self, element: Element) -> None:\n for view in self.element_views:\n if view.element in element.child_elements:\n raise ValueError(f\"A child of {element.name} is already in this view.\")\n if view.element is getattr(element, \"parent\", None):\n raise ValueError(\n f\"The parent of {element.name} is already in this view.\"\n )", "def is_in(self, entry):\n return entry in self.__entries", "def isContainedIn(self, t):\n if self.parent is None:\n return False\n if self.parent.getClassName() == t:\n return True\n return self.parent.isContainedIn(t)", "def isin(self, item):\n return self.get(item) is not None", "def IsVisibleInView(object_id, view=None):\n rhobj = rhutil.coercerhinoobject(object_id, True, True)\n viewport = __viewhelper(view).MainViewport\n bbox = rhobj.Geometry.GetBoundingBox(True)\n return rhobj.Visible and viewport.IsVisible(bbox)", "def is_element_display(self, selector):\n return True if self.get_element(selector).is_displayed() else False", "def __contains__(self, item):\n if item in self._parents:\n return True\n else:\n return False", "def __contains__(self, pos):\n if pos in self._coordinates:\n return True\n return False", "def is_tagged(self,tag_name,element):\n return (tag_name in self.tag2elements.keys()) and (element in self.tag2elements[tag_name])", "def elementIsVisible(self, element_tuple):\n result = self.CORE.find_element(*self.format_element(element_tuple)).is_displayed()\n self.log_info(f\"Browser.elementIsVisible: {element_tuple} is {'' if result else 'not '}present\")\n return result", "def __contains__(self, item):\n try:\n pos = Vec2(*item)\n return pos.x >= self.origin.x and pos.y >= self.origin.y \\\n and pos.x < self.origin.x + self.size.x \\\n and pos.y < self.origin.y + self.size.y\n except TypeError:\n return False", "def __contains__(self, fragment):\n return fragment in self._items", "def __contains__(self, item):\n return item in self.contents", "def _contains(self, element):\n if not isinstance(element, Tuple) or len(element) != 2:\n return S.false\n\n if not element[1].is_Integer:\n return S.false\n\n if element[1] >= len(self.sets) or element[1] < 0:\n return S.false\n\n return self.sets[element[1]]._contains(element[0])", "def has_node(self, val):\n return val in self", "def has_node(self, val):\n return val in self", "def __contains__(self, item: Any) -> bool:\n return item in self.item_to_index", "def __contains__(self, item):\r\n current = self\r\n while current is not None:\r\n if item in current.locals:\r\n return True\r\n current = current.parent\r\n return False", "def contains(self, x: object):\n return x in self.items", "def isElement(self):\n return _libsbml.XMLToken_isElement(self)", "def __contains__(self, x):\n return x in (v for v, _ in self)" ]
[ "0.72909355", "0.6978362", "0.6958688", "0.67255217", "0.6717766", "0.6496774", "0.64491296", "0.64290804", "0.6372613", "0.6279521", "0.6277805", "0.62600285", "0.6246147", "0.6245345", "0.6212836", "0.6174918", "0.6135724", "0.61295", "0.61222446", "0.61159575", "0.6111", "0.6101821", "0.60952115", "0.6083216", "0.6083216", "0.6082135", "0.6081493", "0.60742146", "0.6030743", "0.6003118" ]
0.8923193
0
Find a child element view matching a given element.
def find_element_view( self, *, element: Optional[Element] = None, ) -> Optional[ElementView]: return next( (view for view in self.element_views if view.element.id == element.id), None )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __getChildView(self, parentId, childSeq):\n # child_view = None\n # str_getChildView = \"self.vc.findViewById('\" + parentId + \"')\"\n # for index in childSeq:\n # str_getChildView += ('.children[' + str(index) + ']')\n # printLog(self.threadName + \"executing child_view=%s\" % str_getChildView)\n # exec 'child_view=' + str_getChildView\n # return child_view\n pv = self.vc.findViewById(parentId)\n if not pv:\n # printLog(self.threadName + '[__getChildView] could not find parent view %s' % parentId, logging.DEBUG)\n return None\n for index in childSeq:\n if DEBUG:\n printLog(self.threadName + '[__getChildView] searching child view: %s[%s]' % (pv.getId(), index),\n logging.DEBUG)\n cv = pv.children[int(index)]\n if cv:\n if DEBUG:\n printLog(self.threadName + '[__getChildView] found child view: %s' % cv.getId(), logging.DEBUG)\n pv = cv\n else:\n # printLog(self.threadName + '[__getChildView] could not find child of %s' % pv.getId(), logging.DEBUG)\n return None\n return pv", "def get_child_by(self, selector):\r\n for child in self.get_children():\r\n if selector(child):\r\n return child\r\n return None", "def __getView(self, raw_view_id):\n if iDevice.dump_view:\n self.__dumpview()\n id_RE = re.compile(\"^(id/\\D+)\\((\\S+)\\)$\")\n if DEBUG:\n printLog(self.threadName + \"[__getView] raw view id:%s\" % raw_view_id)\n if id_RE.match(raw_view_id):\n # search the child by sequence path\n viewId, seq_string = id_RE.search(raw_view_id).groups()\n if DEBUG:\n printLog(self.threadName + \"[__getView] view id:%s, seq:%s\" % (viewId, seq_string))\n seqs = seq_string.split(',')\n tv = self.__getChildView(viewId, seqs)\n else:\n # search with the given id directly\n if DEBUG:\n printLog(self.threadName + \"finding view by id %s ...\" % raw_view_id, logging.DEBUG)\n tv = self.vc.findViewById(raw_view_id)\n # if tv:\n # printLog('Found view %s.' % raw_view_id, logging.DEBUG)\n # self.resultFlag = True\n # else:\n # printLog('Target view %s not found.' % raw_view_id, logging.ERROR)\n # self.resultFlag = False\n\n return tv", "def get_by_element(self, element):\n token_ct = ContentType.objects.get_for_model(element)\n try:\n return self.get(\n elements__content_type=token_ct,\n elements__object_id=element.pk,\n )\n except ObjectDoesNotExist:\n return None", "def find_element(self, element: WebElement) -> WebElement:\n return element", "def is_element_in_view(self, element: Element) -> bool:\n return self.find_element_view(element=element) is not None", "def find_element(self, locator, parent=None):\n return self._element_finder.find(locator, parent=parent)", "def _find_in_xml(self, pattern, element=None, namespace=Xmlns_path):\n el = self._xml if element is None else element\n return el.find('.//' + namespace + pattern)", "def search(self, item):\n if self._element == item:\n return self\n if self._leftchild:\n node = self._leftchild.search(item)\n if node != None:\n return node\n if self._rightchild:\n node = self._rightchild.search(item)\n if node != None:\n return node\n return None", "def locateChild(ctx, segments):", "def get_child(self, uid: str):\n if not self.has_child(uid):\n raise RuntimeError(\"Widget '{}' doesn't contain child '{}'.\".format(self.uid, uid))\n\n for w in self._children:\n if w.uid == uid:\n return w", "def get_view(self):\n for w in self.child_widgets():\n return w", "def find(self, uid):\n return self._root.find(uid)", "def activeView(self):\r\n subWin = self.parentWidget().activeSubWindow()\r\n \r\n if subWin:\r\n for child in subWin.children(): \r\n if 'view' in child.objectName(): # Grab the View from the active tab/sub-window\r\n return child", "def FindElement(self, element):\r\n for eachElement in self.__listOfElements:\r\n if eachElement == element:\r\n return eachElement\r\n raise RepositoryError(\"Inexisting Element\")", "def __find(self, x, parent: 'Node'):\n found = None\n if parent.value == x:\n return parent\n\n for child in parent.children:\n if child.value == x:\n return child\n new_found = self.__find(x, parent=child)\n if new_found:\n found = new_found\n\n return found", "def element(self):\n if self._root is None:\n return WebDriverWait(self._browser, TMO).until(\n lambda browser: browser.find_element(*self._locator))\n else:\n return WebDriverWait(self._browser, TMO).until(\n lambda browser: self._root.find_element(*self._locator))", "def find_element_inside_element(self, parent_element: Union[WebElement, Tuple[By, str]],\n child_element_locator: Tuple[By, str], wait_time=10,\n skip_exception=False) -> Union[WebElement, None]:\n parent_element = self.find_element(parent_element)\n for i in range(wait_time):\n by_type, value = child_element_locator\n if by_type == By.CSS_SELECTOR:\n child = parent_element.find_element_by_css_selector(value)\n elif by_type == By.XPATH:\n child = parent_element.find_element_by_xpath(value)\n else:\n child = parent_element.find_element(child_element_locator)\n if child:\n return child\n time.sleep(1)\n else:\n if not skip_exception:\n raise TimeoutException(f'Element was not found in {wait_time} seconds')\n return None", "def check_parent_and_children_not_in_view(self, element: Element) -> None:\n for view in self.element_views:\n if view.element in element.child_elements:\n raise ValueError(f\"A child of {element.name} is already in this view.\")\n if view.element is getattr(element, \"parent\", None):\n raise ValueError(\n f\"The parent of {element.name} is already in this view.\"\n )", "def _add_element(self, element: Element, add_relationships: bool) -> ElementView:\n if element not in self.model:\n raise RuntimeError(\n f\"The element {element} does not exist in the model associated with \"\n f\"this view.\"\n )\n view = self.find_element_view(element=element)\n if view is None:\n view = ElementView(element=element)\n self.element_views.add(view)\n if add_relationships:\n self._add_relationships(element)\n return view", "def get_element( self, element_name, base_element = None ):\n if base_element is not None:\n if not etree.iselement( base_element ):\n return None\n else:\n base_element = self.xml_root\n element = base_element.find( element_name )\n if element == 'None':\n return None\n return element", "def findChild(self, name):\n\n # Note - this returns the first child of the given name\n # any other children with similar names down the tree\n # is not considered.\n \n for child in self.getAllChildren():\n if child.getName() == name:\n return child", "def get_child(self, child_index):\n try:\n return self.children[child_indexndex] #Return the child at the provided index\n except: #If the index is invalid,\n return None #Returns None", "def find_child_element(xml, tag, name, attrib_value=None):\r\n if attrib_value is None:\r\n for element in xml.getchildren():\r\n if element.tag == tag and name in element.attrib:\r\n return element\r\n else:\r\n for element in xml.getchildren():\r\n if element.tag == tag and name in element.attrib and\\\r\n element.attrib[name] == attrib_value:\r\n return element", "def find(self):\n if self.get_parent() == self:\n return self\n else:\n self.__parent = self.__parent.find()\n return self.__parent.find()", "def get_child(self, character):\n if self.has_child(character):\n index = self._get_index(character.upper())\n return self.children[index]\n else:\n raise ValueError(f'No child exists for character {character!r}')", "def _remove_element(self, element: Element) -> None:\n if element not in self.model:\n raise RuntimeError(\n f\"The element {element} does not exist in the model associated with \"\n f\"this view.\"\n )\n self.element_views.add(ElementView(id=element.id))\n for element_view in list(self.element_views): # Copy as modifying as we go\n if element_view.id == element.id:\n self.element_views.remove(element_view)\n\n for relationship_view in list(self._relationship_views):\n if (\n relationship_view.relationship.source.id == element.id\n or relationship_view.relationship.destination.id == element.id\n ):\n self._relationship_views.remove(relationship_view)", "def getChild(self, name):\n \n for child in self._children:\n if child.getName() == name:\n return child", "def get_element_by_element_id(self, element_id):\n for element in self.iterate():\n if element.get_id() == element_id:\n return element", "def _find_element(self, driver):\n WebDriverWait(driver, self.timeout).until(EC.visibility_of_element_located(self.locator))\n element = driver.find_element(*self.locator)\n page_logger.debug('Element found: %s' % self.locator[1])\n return element" ]
[ "0.62151027", "0.611608", "0.58677596", "0.5785795", "0.573255", "0.56479764", "0.5616665", "0.55585456", "0.5556", "0.5547902", "0.55382514", "0.54818493", "0.54810154", "0.5471856", "0.5421521", "0.53672916", "0.5366021", "0.52839684", "0.5273117", "0.5253067", "0.52432305", "0.5230228", "0.5217506", "0.5193097", "0.5186378", "0.517545", "0.5166384", "0.51486844", "0.51396227", "0.51134145" ]
0.7645557
0
Find a child relationship view matching the supplied nonNone arguments.
def find_relationship_view( self, *, relationship: Optional[Relationship] = None, description: Optional[str] = None, response: Optional[bool] = None, ) -> Optional[RelationshipView]: for view in self._relationship_views: rel = view.relationship if ( (relationship is None or rel.id == relationship.id) and ( description is None or view.description == description or (view.description is None and rel.description == description) ) and (response is None or view.response == response) ): return view
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __getChildView(self, parentId, childSeq):\n # child_view = None\n # str_getChildView = \"self.vc.findViewById('\" + parentId + \"')\"\n # for index in childSeq:\n # str_getChildView += ('.children[' + str(index) + ']')\n # printLog(self.threadName + \"executing child_view=%s\" % str_getChildView)\n # exec 'child_view=' + str_getChildView\n # return child_view\n pv = self.vc.findViewById(parentId)\n if not pv:\n # printLog(self.threadName + '[__getChildView] could not find parent view %s' % parentId, logging.DEBUG)\n return None\n for index in childSeq:\n if DEBUG:\n printLog(self.threadName + '[__getChildView] searching child view: %s[%s]' % (pv.getId(), index),\n logging.DEBUG)\n cv = pv.children[int(index)]\n if cv:\n if DEBUG:\n printLog(self.threadName + '[__getChildView] found child view: %s' % cv.getId(), logging.DEBUG)\n pv = cv\n else:\n # printLog(self.threadName + '[__getChildView] could not find child of %s' % pv.getId(), logging.DEBUG)\n return None\n return pv", "def related_view(self):\n return get_related_view(self.request)", "def get_related_view(request):\n return request.environ.get('cone.app.related_view', None)", "def get(self, child_name=None):\n dn = self.org_dn\n if child_name is None:\n filter = 'objectClass=%s' % self.container_class\n msg = 'Searching for children of org %s' % self.org_name\n else:\n filter = self.container_attr + '=' + child_name\n search_scope = 1 #scope one level\n msg = 'Searching at %s with scope %s and filter %s' % \\\n (dn, search_scope, filter)\n self.log.debug(msg)\n result = self._get_object(dn, search_scope, filter)\n self.log.debug('Result: %s' % result)\n return result", "def get_reply(self, parent, child):\n try:\n reply = CommentReply.objects.get(\n pk=child,\n comment_to=parent)\n except CommentReply.DoesNotExist:\n raise exceptions.NotFound(\n f'Comment reply of ID {child} nonexistent'\n )\n\n return reply", "def fm_get_child(self, idx):\n return self._relation_lst[self.CHILD][idx]", "def get_object(self):\n lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field\n\n assert lookup_url_kwarg in self.kwargs, (\n 'Expected view %s to be called with a URL keyword argument '\n 'named \"%s\". Fix your URL conf, or set the `.lookup_field` '\n 'attribute on the view correctly.' %\n (self.__class__.__name__, lookup_url_kwarg)\n )\n\n filter_kwargs = {\n \"parent_id\": self.kwargs[\"boards_pk\"],\n self.lookup_field: self.kwargs[lookup_url_kwarg]\n }\n obj = get_object_or_404(self.get_queryset(), **filter_kwargs)\n\n # May raise a permission denied\n self.check_object_permissions(self.request, obj)\n\n return obj", "def spdx_relationship(self) -> Optional[pulumi.Input['RelationshipNoteArgs']]:\n return pulumi.get(self, \"spdx_relationship\")", "def find_in_db(self, *args, **kwargs):\n return self.relation.find_in_db(*args, **kwargs)", "def get_view(self, request) -> Optional[View]:\n\n # Grab ViewAction and use sorted_actions to find first match\n sorted_actions = ViewAction.sorted_actions(self.registry)\n\n # Find the first action which matches the args\n for action, view_class in sorted_actions:\n if action.all_predicates_match(request):\n # Use dependency injection to make an instance of\n # that view class\n view_instance = inject(\n dict(), # props\n self.get_injectables(request),\n view_class,\n request=request\n )\n return view_instance\n\n # No matches, return None\n return None", "def _get_foreign_object(self, instance: models.Model\n ) -> Optional[models.Model]:\n try:\n return getattr(instance, self.foreign_key)\n except models.ObjectDoesNotExist:\n # this may raise DNE while cascade deleting with Collector\n return None", "def get_parent_object_from_request(self, request):\n resolved = resolve(request.path_info)\n if 'object_id' in resolved.kwargs:\n return self.parent_model.objects.get(pk=resolved.kwargs['object_id'])\n return None", "def get_parent_object_from_request(self, request):\n resolved = resolve(request.path_info)\n if 'object_id' in resolved.kwargs:\n return self.parent_model.objects.get(pk=resolved.kwargs['object_id'])\n return None", "def get_parent_object_from_request(self, request):\n resolved = resolve(request.path_info)\n if 'object_id' in resolved.kwargs:\n return self.parent_model.objects.get(pk=resolved.kwargs['object_id'])\n return None", "def getChild(self, label = None, *args, **kwargs):\n\n\t\tif (self.child_class is None):\n\t\t\traise NotImplementedError()\n\n\t\tif (label is None):\n\t\t\tif (self.current is not None):\n\t\t\t\treturn self.current\n\t\t\tlabel = self.getUnique(self.child_uniqueName)\n\t\t\tselect = True\n\t\telse:\n\t\t\tselect = False\n\n\t\tchild = self._get(label, returnForNone = None)\n\t\tif (child is None):\n\t\t\tchild = self.new(*args, label = label, **kwargs)\n\t\tif (select):\n\t\t\tself.select(child)\n\t\treturn child", "def _get_child_page_of_type(self, cls):\n child = self.get_children().type(cls).live().first()\n return child.specific if child else None", "def _get_child_page_of_type(self, cls):\n child = self.get_children().type(cls).live().first()\n return child.specific if child else None", "def view():\n # retrieve child and dorm parents records from database\n children = Child.query.filter_by().all()\n parents = Parent.query.filter_by().all()\n return render_template('view.html', children=children, parents=parents)", "def __getView(self, raw_view_id):\n if iDevice.dump_view:\n self.__dumpview()\n id_RE = re.compile(\"^(id/\\D+)\\((\\S+)\\)$\")\n if DEBUG:\n printLog(self.threadName + \"[__getView] raw view id:%s\" % raw_view_id)\n if id_RE.match(raw_view_id):\n # search the child by sequence path\n viewId, seq_string = id_RE.search(raw_view_id).groups()\n if DEBUG:\n printLog(self.threadName + \"[__getView] view id:%s, seq:%s\" % (viewId, seq_string))\n seqs = seq_string.split(',')\n tv = self.__getChildView(viewId, seqs)\n else:\n # search with the given id directly\n if DEBUG:\n printLog(self.threadName + \"finding view by id %s ...\" % raw_view_id, logging.DEBUG)\n tv = self.vc.findViewById(raw_view_id)\n # if tv:\n # printLog('Found view %s.' % raw_view_id, logging.DEBUG)\n # self.resultFlag = True\n # else:\n # printLog('Target view %s not found.' % raw_view_id, logging.ERROR)\n # self.resultFlag = False\n\n return tv", "def get(self, request, *args, **kwargs):\n with connection.cursor() as cursor:\n params = (kwargs['object_id'], kwargs['content_type_id'],\n ContentType.objects.get_for_model(models.Comment).id)\n cursor.execute(SQL_GET_CHILDREN, params)\n return Response(dictfetchall(cursor))", "def get_child_by_readable_id(self, readable_id):\n raise NotImplementedError", "def use_any_effective_relationship_view(self):\n # Implemented from template for\n # osid.relationship.RelationshipLookupSession.use_any_effective_relationship_view\n self._use_any_effective_view()", "def related_view_filter():\n pass", "def use_effective_relationship_view(self):\n # Implemented from template for\n # osid.relationship.RelationshipLookupSession.use_effective_relationship_view\n self._use_effective_view()", "def get_child(self, child_index):\n try:\n return self.children[child_indexndex] #Return the child at the provided index\n except: #If the index is invalid,\n return None #Returns None", "def locateChild(req, segments):", "def allow_child (self, name, user, obj, ** kw) :\n try :\n p = self.child_permission_map [name]\n except KeyError :\n return True\n else :\n return p.instance (user, self, obj = obj, ** kw)", "def _safe_resolve(self, sequence, parent=True):\n path = '/' + self.name + '/' + '/'.join([sequence[::-1][:i][::-1]\n for i in range(1, len(sequence) + int(parent == False))])\n try:\n return self._resolver.get(self, path)\n except ChildResolverError as e:\n return None", "def get(self, *args, **kwargs):\n self.before_get(args, kwargs)\n\n relationship_field, model_relationship_field, related_type_, related_id_field = self._get_relationship_data()\n related_view = self.schema._declared_fields[relationship_field].related_view\n related_view_kwargs = self.schema._declared_fields[relationship_field].related_view_kwargs\n\n obj, data = self._data_layer.get_relationship(model_relationship_field,\n related_type_,\n related_id_field,\n kwargs)\n\n for key, value in copy(related_view_kwargs).items():\n if isinstance(value, str) and value.startswith('<') and value.endswith('>'):\n tmp_obj = obj\n for field in value[1:-1].split('.'):\n tmp_obj = getattr(tmp_obj, field)\n related_view_kwargs[key] = tmp_obj\n\n result = {'links': {'self': request.path,\n 'related': url_for(related_view, **related_view_kwargs)},\n 'data': data}\n\n qs = QSManager(request.args, self.schema)\n if qs.include:\n schema = compute_schema(self.schema, dict(), qs, qs.include)\n\n serialized_obj = schema.dump(obj)\n result['included'] = serialized_obj.data.get('included', dict())\n\n self.after_get(result)\n return result", "def _resolve_lookup((model, lookup, arg_name), view_kwargs):\n value = view_kwargs.get(arg_name)\n if value is None:\n raise ValueError(\"Expected kwarg '%s' not found.\" % arg_name)\n if isinstance(model, basestring):\n model_class = get_model(*model.split('.'))\n else:\n model_class = model\n if model_class is None:\n raise ValueError(\"The given argument '%s' is not a valid model.\" %\n model)\n if inspect.isclass(model_class) and not issubclass(model_class, Model):\n raise ValueError(\"The argument '%s' needs to be a model.\" % model)\n return get_object_or_404(model_class, **{lookup: value})" ]
[ "0.59846926", "0.5548496", "0.5530844", "0.5438237", "0.52437264", "0.5235289", "0.52155375", "0.51775765", "0.5124838", "0.5076548", "0.49964866", "0.49897185", "0.49897185", "0.49897185", "0.49834818", "0.49717134", "0.49717134", "0.49509534", "0.49450922", "0.49279633", "0.49062932", "0.48958907", "0.48841208", "0.48823133", "0.48673636", "0.4858717", "0.48373654", "0.4800029", "0.4799494", "0.47901264" ]
0.655181
0
Ensure that an element can't be added if parent or children are in view.
def check_parent_and_children_not_in_view(self, element: Element) -> None: for view in self.element_views: if view.element in element.child_elements: raise ValueError(f"A child of {element.name} is already in this view.") if view.element is getattr(element, "parent", None): raise ValueError( f"The parent of {element.name} is already in this view." )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_lacking_parent(self):\n pass", "def child_invalid(self):\n raise NotImplementedError(\n \"{} does not have implemented `child_invalid`\".format(self)\n )", "def has_parent(self):\n return False", "def incorrectly_nested(self):\n return self.parent is not None and self.root < self.parent.root", "def is_parent(self):\n return not self.children", "def can_create_at(cls, parent):\n return (\n super().can_create_at(parent)\n and not parent.get_children().type(cls).exists()\n )", "def can_create_at(cls, parent):\n return (\n super().can_create_at(parent)\n and not parent.get_children().type(cls).exists()\n )", "def can_create_at(cls, parent):\n return (\n super().can_create_at(parent)\n and not parent.get_children().type(cls).exists()\n )", "def can_create_at(cls, parent):\n return (\n super().can_create_at(parent)\n and not parent.get_children().type(cls).exists()\n )", "def can_create_at(cls, parent):\n return (\n super().can_create_at(parent)\n and not parent.get_children().type(cls).exists()\n )", "def orphaned(self):\n return (self.parent is None)", "def has_child(self):\n return False", "def circular_checker(parent, child):\n if parent == child:\n raise ValidationError('Self links are not allowed.')\n\n if child.pk in parent.get_ancestor_pks():\n raise ValidationError('The object is an ancestor.')", "def test_add_self_as_parent(self):\n groupa = Group('groupa')\n with pytest.raises(Exception):\n groupa.add_parent(groupa)", "def has_parent(self):\n return self.parent != None", "def test_insert_no_parent(tree):\n with pytest.raises(ValueError):\n assert tree.insert(1)", "def requires_safe_render(self) -> bool:\n return True\n # return any(is_reserved(child.name) for child in self.children)", "def _validate_node(self, node):\n if not isinstance(node, self._Node):\n raise TypeError('Invalid object type!')\n if node._container != self:\n raise ValueError('Node does not belong to this list!')\n if node._index < 0 or node._index >= self._size:\n raise ValueError('Invalid node!')", "def handle_uncaught_event(self, event):\r\n if self.get_visible():\r\n for i in self.widgets:\r\n if i.get_visible():\r\n if i.handle_uncaught_event(event):\r\n return True\r\n return False", "def _markValidElements(self, element):\n self.log(\"element:%s\" % element.get_name())\n if element == self.typefind:\n return\n self._validelements.append(element)\n # find upstream element\n pad = list(element.sink_pads())[0]\n parent = pad.get_peer().get_parent()\n self._markValidElements(parent)", "def get_apparent_element(self, doc):\n return NotImplemented", "def is_element_in_view(self, element: Element) -> bool:\n return self.find_element_view(element=element) is not None", "def test_replace_parent_to_self(self):\n groupa, groupb = Group('groupa'), Group('groupb')\n groupa.add_parent(groupb)\n with pytest.raises(Exception):\n groupa.replace_parent(groupb, groupa)", "def islchild(self):\n\t\tif (self.parent() and self.parent().lchild() is self): #TODO is or == here\n\t\t\treturn True\n\t\treturn False", "def checkObject(container, name, object):\n\n # check __setitem__ precondition\n containerProvided = providedBy(container)\n __setitem__ = containerProvided.get('__setitem__')\n if __setitem__ is not None:\n precondition = __setitem__.queryTaggedValue('precondition')\n if precondition is not None:\n precondition(container, name, object)\n\n # check that object is not being pasted into itself or its children.\n target = container\n while target is not None:\n if target is object:\n raise TypeError(\"Cannot add an object to itself or its children.\")\n if zope.location.interfaces.ILocation.providedBy(target):\n target = target.__parent__\n else:\n target = None\n\n # check the constraint on __parent__\n __parent__ = providedBy(object).get('__parent__')\n try:\n validate = __parent__.validate\n except AttributeError:\n pass\n else:\n validate(container)\n\n if not containerProvided.extends(IContainer):\n # If it doesn't implement IContainer, it can't contain stuff.\n raise TypeError(\n _('Container is not a valid Zope container.')\n )", "def _layout_invalidated(self, change):\n # The superclass handler is sufficient.\n super(VGroup, self)._layout_invalidated(change)", "def _remove_element(self, element: Element) -> None:\n if element not in self.model:\n raise RuntimeError(\n f\"The element {element} does not exist in the model associated with \"\n f\"this view.\"\n )\n self.element_views.add(ElementView(id=element.id))\n for element_view in list(self.element_views): # Copy as modifying as we go\n if element_view.id == element.id:\n self.element_views.remove(element_view)\n\n for relationship_view in list(self._relationship_views):\n if (\n relationship_view.relationship.source.id == element.id\n or relationship_view.relationship.destination.id == element.id\n ):\n self._relationship_views.remove(relationship_view)", "def hasFailingAncestor(self):\n parent = self.parent\n if parent is None:\n return\n # TODO: Temporarily disabled.\n return\n return parent.hasFailed or parent.hasFailingAncestor()", "def check_children_eq_parent(self):\n\t\tif len(self.tree.children) == 0:\n\t\t\treturn\n\n\t\tchild_count = 0.0\n\t\tfor child in self.tree.children:\n\t\t\tchild_count += child.utility.count\n\t\tassert self.utility.count == child_count", "def test_container_no_asset_for_container(self):\n assets = Asset.objects.select_subclasses()\n right = Container.objects.get(name='right')\n SectionAsset.objects.create(section=self.section, asset=assets[0],\n container=right)\n # Refresh the section object to get new relations\n self.section = Section.objects.get(pk=self.section.pk)\n context = {\n 'assets': self.section.sectionasset_set.order_by('weight')\n }\n container_name = \"left\"\n html = container(context, container_name)\n self.assertIn(\"storybase-container-placeholder\", html)\n self.assertIn(container_name, html)" ]
[ "0.60467064", "0.5970557", "0.5904759", "0.58408904", "0.57895845", "0.5762888", "0.5762888", "0.5762888", "0.5762888", "0.5762888", "0.55479467", "0.5520642", "0.5500766", "0.545122", "0.5440226", "0.5396799", "0.5394671", "0.53918046", "0.53586113", "0.53525645", "0.5312074", "0.530367", "0.5290503", "0.5288612", "0.5279782", "0.5259862", "0.5253384", "0.5248703", "0.52478385", "0.52453774" ]
0.79654515
0
Implements bilinear functions using replicated secret shares. Shares are input as ArithmeticSharedTensors and are replicated within this function to perform computations. The protocol used here is that of section 3.2 of ABY3
def __replicated_secret_sharing_protocol(op, x, y, *args, **kwargs): assert op in { "mul", "matmul", "conv1d", "conv2d", "conv_transpose1d", "conv_transpose2d", } x_shares, y_shares = replicate_shares([x.share, y.share]) x1, x2 = x_shares y1, y2 = y_shares z = x.shallow_copy() z.share = getattr(torch, op)(x1, y1, *args, **kwargs) z.share += getattr(torch, op)(x1, y2, *args, **kwargs) z.share += getattr(torch, op)(x2, y1, *args, **kwargs) return z
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute_shared(self, x=None, y=None):\n with tf.name_scope(\"comput_shared\") as name:\n assert (x is None) != (y is None)\n is_x = x is not None\n\n with tf.name_scope(\"piece0\"):\n range_min = tf.convert_to_tensor(self.range_min, name='range_min')\n kx = _knot_positions(self.bin_widths, range_min)\n ky = _knot_positions(self.bin_heights, range_min)\n kd = _padded(_ensure_at_least_1d(self.knot_slopes), lhs=1, rhs=1)\n kx_or_ky = kx if is_x else ky\n kx_or_ky_min = kx_or_ky[..., 0]\n kx_or_ky_max = kx_or_ky[..., -1]\n x_or_y = x if is_x else y\n out_of_bounds = tf.zeros_like(x_or_y) #; (x_or_y <= kx_or_ky_min) | (x_or_y >= kx_or_ky_max)\n x_or_y = x_or_y # tf.where(out_of_bounds, kx_or_ky_min, x_or_y)\n\n with tf.name_scope(\"piece0b\"):\n shape = functools.reduce(\n tf.broadcast_dynamic_shape,\n (\n tf.shape(x_or_y[..., tf.newaxis]), # Add a n_knots dim.\n tf.shape(kx),\n tf.shape(ky),\n tf.shape(kd)))\n\n with tf.name_scope(\"piece1\"):\n\n bc_x_or_y = tf.broadcast_to(x_or_y, shape[:-1])\n bc_kx = tf.broadcast_to(kx, shape)\n bc_ky = tf.broadcast_to(ky, shape)\n bc_kd = tf.broadcast_to(kd, shape)\n bc_kx_or_ky = bc_kx if is_x else bc_ky\n\n with tf.name_scope(\"piece1b\"):\n indices = tf.clip_by_value(\n tf.searchsorted(\n bc_kx_or_ky[..., :-1],\n bc_x_or_y[..., tf.newaxis],\n side='right',\n out_type=tf.int32) - 1, 0, 1000)\n\n def gather_squeeze(params, indices):\n rank = tensorshape_util.rank(indices.shape)\n if rank is None:\n raise ValueError('`indices` must have statically known rank.')\n return tf.gather(params, indices, axis=-1, batch_dims=rank - 1)[..., 0]\n\n with tf.name_scope(\"piece2\"):\n x_k = gather_squeeze(bc_kx, indices)\n x_kp1 = gather_squeeze(bc_kx, indices + 1)\n y_k = gather_squeeze(bc_ky, indices)\n y_kp1 = gather_squeeze(bc_ky, indices + 1)\n d_k = gather_squeeze(bc_kd, indices)\n d_kp1 = gather_squeeze(bc_kd, indices + 1)\n h_k = y_kp1 - y_k\n w_k = x_kp1 - x_k\n s_k = h_k / w_k\n\n return _SplineShared(\n out_of_bounds=out_of_bounds,\n x_k=x_k,\n y_k=y_k,\n d_k=d_k,\n d_kp1=d_kp1,\n h_k=h_k,\n w_k=w_k,\n s_k=s_k)", "def _eqz_2PC(self):\n # Create BinarySharedTensors from shares\n x0 = MPCTensor(self.share, src=0, ptype=Ptype.binary)\n x1 = MPCTensor(-self.share, src=1, ptype=Ptype.binary)\n\n # Perform equality testing using binary shares\n x0._tensor = x0._tensor.eq(x1._tensor)\n x0.encoder = self.encoder\n\n # Convert to Arithmetic sharing\n result = x0.to(Ptype.arithmetic, bits=1)\n result.encoder._scale = 1\n\n return result", "def share_combining(self, shares):\n mod_shares = [share[1] * (calc_lambda(shares,\n share[0], self.precomputed_fac)) for i, share in enumerate(shares)]\n return sum(mod_shares)", "def shared(data):\r\n shared_x = theano.shared(\r\n np.asarray(data[0], dtype=theano.config.floatX), borrow=True)\r\n shared_y = theano.shared(\r\n np.asarray(data[1], dtype=theano.config.floatX), borrow=True)\r\n return shared_x, T.cast(shared_y, \"int32\")", "def shared(data):\n shared_x = theano.shared(\n np.asarray(data[0], dtype=theano.config.floatX), borrow=True)\n shared_y = theano.shared(\n np.asarray(data[1], dtype=theano.config.floatX), borrow=True)\n return shared_x, T.cast(shared_y, \"int32\")", "def client_side_sfsa_round1(communication, client_socket, FEDSUBAVG_SELF_STORAGE, FEDSUBAVG_OTHERS_STORAGE, \\\r\n fedsubavg_security_para_dict, FEDSUBAVG_DHKE):\r\n start_time_1 = time.time()\r\n # Generate seed for PRNG\r\n seed_len = fedsubavg_security_para_dict['seed_len']\r\n fedsubavg_b_entropy = os.urandom(seed_len/8) #bytes\r\n fedsubavg_b = bytes2int(fedsubavg_b_entropy)\r\n\r\n t = FEDSUBAVG_SELF_STORAGE['t']\r\n n = FEDSUBAVG_SELF_STORAGE['n']\r\n # Generate t-out-of-n shares for PRNG's seed b\r\n fedsubavg_shares_b = SecretSharer.split_secret(fedsubavg_b, t, n)\r\n # Generate t-out-of-n shares for client's ssk\r\n fedsubavg_shares_my_ssk = SecretSharer.split_secret(FEDSUBAVG_SELF_STORAGE['my_ssk'], t, n)\r\n\r\n # Store random seed, and secret shares into self dictionary\r\n FEDSUBAVG_SELF_STORAGE['b_entropy'] = fedsubavg_b_entropy\r\n '''\r\n FEDSUBAVG_SELF_STORAGE['b'] = fedsubavg_b\r\n FEDSUBAVG_SELF_STORAGE['shares_b'] = fedsubavg_shares_b\r\n FEDSUBAVG_SELF_STORAGE['shares_my_ssk'] = fedsubavg_shares_my_ssk\r\n '''\r\n\r\n # Store my share of b in isolation\r\n # No need to store my share of my ssk, since I am alive to myself!\r\n fedsubavg_my_share_b = fedsubavg_shares_b[0]\r\n fedsubavg_shares_b = list( set(fedsubavg_shares_b) - set([fedsubavg_my_share_b]))\r\n FEDSUBAVG_SELF_STORAGE['my_share_b'] = fedsubavg_my_share_b\r\n\r\n fedsubavg_ss_ciphers_dict = {}\r\n for idx, client_index in enumerate(FEDSUBAVG_OTHERS_STORAGE.keys()): # Already except myself\r\n # Derive symmetric encryption key \"agreed\" with other client (with client_index) (via Diffie-Hellman Agreement)\r\n sym_enc_key = FEDSUBAVG_DHKE.agree(FEDSUBAVG_SELF_STORAGE['my_csk'], FEDSUBAVG_OTHERS_STORAGE[client_index]['cpk'])\r\n # Send ciphertext to other client (with client_index), where PS works as a mediation\r\n msg = str(FEDSUBAVG_SELF_STORAGE['my_index']) + ' || ' + str(client_index) + ' || ' + str(fedsubavg_shares_b[idx]) \\\r\n + ' || ' + str(fedsubavg_shares_my_ssk[idx])\r\n # Encrypt with AES_CBC\r\n enc_msg = AESCipher(str(sym_enc_key)).encrypt(msg)\r\n fedsubavg_ss_ciphers_dict[client_index] = enc_msg\r\n\r\n FEDSUBAVG_OTHERS_STORAGE[client_index]['sym_enc_key'] = sym_enc_key\r\n '''\r\n FEDSUBAVG_OTHERS_STORAGE[client_index]['msg'] = msg\r\n FEDSUBAVG_OTHERS_STORAGE[client_index]['enc_msg'] = enc_msg\r\n '''\r\n end_time_1 = time.time()\r\n\r\n # send encrypted shares to the server\r\n fedsubavg_ss_ciphers_send_message = {'client_ID': FEDSUBAVG_SELF_STORAGE['my_index'],\r\n 'ss_ciphers': fedsubavg_ss_ciphers_dict}\r\n communication.send_np_array(fedsubavg_ss_ciphers_send_message, client_socket)\r\n print('Client %d sent encrypted secret shares to server in secure federated submodel averaging' % FEDSUBAVG_SELF_STORAGE['my_index'])\r\n sys.stdout.flush()\r\n\r\n # Receive other clients' encrypted shares and indices for mutual mask to me from the server\r\n round1_returned_message = communication.get_np_array(client_socket)\r\n print(\"Received other clients' encrypted secret shares and indices for mutual mask from server\")\r\n sys.stdout.flush()\r\n\r\n start_time_2 = time.time()\r\n\r\n # Decrypt the secret shares and store them\r\n ss_ciphers_dict_received = round1_returned_message['ss_ciphers_dict']\r\n for client_index, enc_msg in ss_ciphers_dict_received.items():\r\n # Decrypt the encrypted message and parse it\r\n sym_enc_key = FEDSUBAVG_OTHERS_STORAGE[client_index]['sym_enc_key']\r\n msg = AESCipher(str(sym_enc_key)).decrypt(enc_msg)\r\n msg_parts = msg.split(' || ')\r\n # Sanity check\r\n from_client_index = int(msg_parts[0])\r\n my_index = int(msg_parts[1])\r\n assert from_client_index == client_index and my_index == FEDSUBAVG_SELF_STORAGE['my_index']\r\n # Store secret shares of other clients\r\n FEDSUBAVG_OTHERS_STORAGE[client_index]['share_b'] = msg_parts[2]\r\n FEDSUBAVG_OTHERS_STORAGE[client_index]['share_ssk'] = msg_parts[3]\r\n # Indices of other clients (except myself) for mutual mask U1\\Client Self\r\n FEDSUBAVG_SELF_STORAGE['mutual_mask_general_client_indices'] = round1_returned_message['mutual_mask_general_client_indices']\r\n\r\n end_time_2 = time.time()\r\n write_csv(FEDSUBAVG_SELF_STORAGE['client_computation_time_path'], [FEDSUBAVG_SELF_STORAGE['communication_round_number'], \\\r\n \"sfsa_U1\", end_time_1 - start_time_1 + end_time_2 - start_time_2])", "def from_shares(share, precision=None, src=0, device=None):\n result = BinarySharedTensor(src=SENTINEL)\n share = share.to(device) if device is not None else share\n result.share = CUDALongTensor(share) if share.is_cuda else share\n result.encoder = FixedPointEncoder(precision_bits=precision)\n return result", "def block_reduction_a(self, inputs, scope=None, reuse=None):\n # By default use stride=1 and SAME padding\n with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],\n stride=1, padding='SAME'):\n with tf.variable_scope(scope, 'BlockReductionA', [inputs], reuse=reuse):\n with tf.variable_scope('Branch_0'):\n branch_0 = slim.conv2d(inputs, 384, [3, 3], stride=2, padding='VALID',\n scope='Conv2d_1a_3x3')\n with tf.variable_scope('Branch_1'):\n branch_1 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')\n branch_1 = slim.conv2d(branch_1, 224, [3, 3], scope='Conv2d_0b_3x3')\n branch_1 = slim.conv2d(branch_1, 256, [3, 3], stride=2,\n padding='VALID', scope='Conv2d_1a_3x3')\n with tf.variable_scope('Branch_2'):\n branch_2 = slim.max_pool2d(inputs, [3, 3], stride=2, padding='VALID',\n scope='MaxPool_1a_3x3')\n return tf.concat(axis=3, values=[branch_0, branch_1, branch_2])", "def gen_ramp_biases(ref_dict, nchan=None, data_shape=(2,2048,2048), ref_border=[4,4,4,4]):\n \n if nchan is None:\n nchan = len(ref_dict['amp_offset_mean'])\n\n cube = np.zeros(data_shape)\n nz, ny, nx = data_shape\n chsize = int(nx/nchan)\n \n ######################\n # Add overall bias\n # TODO: Add temperature dependence\n bias_off = ref_dict['master_bias_mean'] + np.random.normal(scale=ref_dict['master_bias_std'])\n cube += bias_off\n\n # Add amplifier offsets\n # These correlate to bias offset\n cf = ref_dict['master_amp_cf']\n amp_off = jl_poly(bias_off, cf) + np.random.normal(scale=ref_dict['amp_offset_std'])\n\n for ch in range(nchan):\n cube[:,:,ch*chsize:(ch+1)*chsize] += amp_off[ch]\n \n # Include frame-to-frame bias variation\n ######################\n bias_off_f2f = np.random.normal(scale=ref_dict['master_bias_f2f'], size=nz)\n amp_off_f2f = np.random.normal(scale=ref_dict['amp_offset_f2f'][0:nchan], size=(nz,nchan))\n\n for i, im in enumerate(cube):\n im += bias_off_f2f[i]\n for ch in range(nchan):\n im[:,ch*chsize:(ch+1)*chsize] += amp_off_f2f[i,ch]\n \n # Add some reference pixel instability relative to active pixels\n ######################\n\n # Mask of active pixels\n mask_act = np.zeros([ny,nx]).astype('bool')\n rb, rt, rl, rr = ref_border\n mask_act[rb:-rt,rl:-rr] = True\n\n # Mask of all reference pixels\n mask_ref = ~mask_act\n\n # ref_inst = np.random.normal(scale=ref_dict['amp_ref_inst_f2f'], size=(nz,nchan))\n for ch in range(nchan):\n mask_ch = np.zeros([ny,nx]).astype('bool')\n mask_ch[:,ch*chsize:(ch+1)*chsize] = True\n\n std = ref_dict['amp_ref_inst_f2f'][ch]\n ref_noise = std * pink_noise(nz)\n cube[:, mask_ref & mask_ch] += ref_noise.reshape([-1,1])\n\n \n # cube[:,mask_ref & mask_ch] += ref_inst[:,ch].reshape([-1,1])\n\n\n # Set even/odd offsets\n ######################\n mask_even = np.zeros([ny,nx]).astype('bool')\n mask_even[:,0::2] = True\n\n mask_odd = np.zeros([ny,nx]).astype('bool')\n mask_odd[:,1::2] = True\n\n for ch in range(nchan):\n mask_ch = np.zeros([ny,nx]).astype('bool')\n mask_ch[:,ch*chsize:(ch+1)*chsize] = True\n\n cube[:, mask_even & mask_ch] += ref_dict['amp_even_col_offset'][ch]\n cube[:, mask_odd & mask_ch] += ref_dict['amp_odd_col_offset'][ch]\n \n return cube", "def test_pooling(self):\n for width in range(2, 5):\n for width2 in range(1, width):\n matrix_size = (4, 5, width)\n matrix = get_random_test_tensor(size=matrix_size)\n pool_size = width2\n for stride in range(1, width2):\n for padding in range(2):\n reference = torch.nn.functional.avg_pool2d(\n matrix.unsqueeze(0), pool_size,\n stride=stride, padding=padding\n )\n\n encrypted_matrix = SharedTensor(matrix)\n encrypted_pool = encrypted_matrix.avg_pool2d(\n pool_size, stride=stride, padding=padding)\n self._check(\n encrypted_pool, reference[0], 'avg_pool2d failed')", "def inline_reduce_fixed_shared(N, buf, x, stride_x, pos, count,\r\n manner_fn, manner_init,\r\n b='', stride_b=''):\r\n if b:\r\n init = manner_init(\"%(x)s[%(pos)s * %(stride_x)s] +\"\r\n \" %(b)s[%(pos)s * %(stride_b)s]\" % locals())\r\n loop_line = manner_fn(\"red\",\r\n manner_init(\"%(x)s[i * %(stride_x)s] + \"\r\n \"%(b)s[i * %(stride_b)s]\" %\r\n locals()))\r\n else:\r\n init = manner_init(\"%(x)s[%(pos)s * %(stride_x)s]\" % locals())\r\n loop_line = manner_fn(\"red\", manner_init(\"%(x)s[i * %(stride_x)s]\" %\r\n locals()))\r\n loop_line2 = manner_fn(\"%s[%s]\" % (buf, pos),\r\n \"%s[i]\" % buf)\r\n r_16 = manner_fn(\"%s[%s]\" % (buf, pos), \"%s[%s+16]\" % (buf, pos))\r\n r_8 = manner_fn(\"%s[%s]\" % (buf, pos), \"%s[%s+8]\" % (buf, pos))\r\n r_4 = manner_fn(\"%s[%s]\" % (buf, pos), \"%s[%s+4]\" % (buf, pos))\r\n r_2 = manner_fn(\"%s[%s]\" % (buf, pos), \"%s[%s+2]\" % (buf, pos))\r\n r_1 = manner_fn(\"%s[%s]\" % (buf, pos), \"%s[%s+1]\" % (buf, pos))\r\n\r\n return \"\"\"\r\n {\r\n // This function trashes buf[1..n_threads],\r\n // leaving the reduction result in buf[0].\r\n float red = %(init)s;\r\n #pragma unroll 16\r\n for (int i = %(pos)s + %(count)s; i<%(N)s; i += %(count)s){\r\n red = %(loop_line)s;\r\n }\r\n buf[%(pos)s] = red;\r\n __syncthreads();\r\n if (%(pos)s < warpSize)\r\n {\r\n for (int i = %(pos)s + warpSize; i < %(count)s; i += warpSize)\r\n {\r\n %(buf)s[%(pos)s] = %(loop_line2)s;\r\n }\r\n if (%(pos)s < 16)\r\n {\r\n //reduce so that %(pos)s 0 has the reduction of everything\r\n if(%(pos)s + 16 < %(N)s)\r\n %(buf)s[%(pos)s] = %(r_16)s;\r\n if(%(pos)s + 8 < %(N)s)\r\n %(buf)s[%(pos)s] = %(r_8)s;\r\n if(%(pos)s + 4 < %(N)s)\r\n %(buf)s[%(pos)s] = %(r_4)s;\r\n if(%(pos)s + 2 < %(N)s)\r\n %(buf)s[%(pos)s] = %(r_2)s;\r\n if(%(pos)s + 1 < %(N)s)\r\n %(buf)s[%(pos)s] = %(r_1)s;\r\n }\r\n }\r\n }\r\n \"\"\" % locals()", "def universal_hashing(x, y, t):\r\n\r\n Q = 32 # Bit length of the input integers\r\n Q_star = Q + t - 1 # Universe within which a, b and x reside\r\n n_apo = len(x) // Q\r\n\r\n # In case n_apo is not an integer, the strings are padded with zeros so that n_apo becomes an integer\r\n if len(x) % Q != 0:\r\n s = Q - (len(x) - n_apo * Q) # Find the number of the necessary zeros to be padded\r\n n_apo = int(np.floor(n_apo) + 1) # Convert n_apo into an integer\r\n x = np.append(x, np.zeros(shape=s, dtype=np.int8))\r\n y = np.append(y, np.zeros(shape=s, dtype=np.int8))\r\n x_Q = np.array_split(x, n_apo)\r\n y_Q = np.array_split(y, n_apo)\r\n else:\r\n n_apo = int(n_apo)\r\n x_Q = np.array_split(x, n_apo)\r\n y_Q = np.array_split(y, n_apo)\r\n\r\n # Convert every element of the arrays to string\r\n for i in range(n_apo):\r\n x_Q[i] = str(x_Q[i]).replace(\"[\", \"\").replace(\"]\", \"\").replace(\" \", \"\")\r\n y_Q[i] = str(y_Q[i]).replace(\"[\", \"\").replace(\"]\", \"\").replace(\" \", \"\")\r\n\r\n # Generate integers a, b, where a is non-zero odd and belongs to [1, Q_star), and b belongs to [0, Q_star)\r\n # These values represent the chosen universal hash function from the family and are communicated via the channel\r\n a = []\r\n ax_x = []\r\n ax_y = []\r\n for i in range(n_apo): # Generate a different a for every d\r\n a_i = random.getrandbits(Q_star)\r\n if a_i % 2 == 0: # a must be odd\r\n a_i += 1\r\n a.append(a_i)\r\n b = random.randint(0, 2 ** Q_star)\r\n\r\n # Perform the integer multiplications ax for every d (a: w_bar, x: w)\r\n for i in range(n_apo):\r\n ax_x_i = a[i] * int(x_Q[i], 2)\r\n ax_y_i = a[i] * int(y_Q[i], 2)\r\n ax_x.append(ax_x_i)\r\n ax_y.append(ax_y_i)\r\n\r\n h_x = sum(ax_x) + b # Get the sum of all multiplications and add integer b afterwards\r\n h_y = sum(ax_y) + b # Get the sum of all multiplications and add integer b afterwards\r\n # Convert to binary and obtain only the last w_bar bits, as the multiplication gives a result larger than w_bar bits\r\n h_x = str(np.binary_repr(h_x))[-Q_star:]\r\n h_y = str(np.binary_repr(h_y))[-Q_star:]\r\n\r\n # Modular arithmetic is replaced with bit shift\r\n # If there are leading zeros in a binary sequence, Python removes them when performing bit shifts\r\n # Since the zeros need to be kept, in order to have a fixed length output, the format function is implemented\r\n h_x = format(int(h_x, 2) >> (Q_star - t), '0' + str(t) + 'b')\r\n h_y = format(int(h_y, 2) >> (Q_star - t), '0' + str(t) + 'b')\r\n\r\n if h_x == h_y:\r\n return True\r\n else:\r\n return False", "def inline_reduce_fixed_shared(N, buf, x, stride_x, pos, count,\r\n manner_fn, manner_init,\r\n b='', stride_b='', dtype='float32'):\r\n if b:\r\n init = manner_init(\"%(x)s[%(pos)s * %(stride_x)s] +\"\r\n \" %(b)s[%(pos)s * %(stride_b)s]\" % locals())\r\n loop_line = manner_fn(\"red\",\r\n manner_init(\"%(x)s[i * %(stride_x)s] + \"\r\n \"%(b)s[i * %(stride_b)s]\" %\r\n locals()))\r\n else:\r\n init = manner_init(\"%(x)s[%(pos)s * %(stride_x)s]\" % locals())\r\n loop_line = manner_fn(\"red\", manner_init(\"%(x)s[i * %(stride_x)s]\" %\r\n locals()))\r\n loop_line2 = manner_fn(\"%s[%s]\" % (buf, pos),\r\n \"%s[i]\" % buf)\r\n r_16 = manner_fn(\"%s[%s]\" % (buf, pos), \"%s[%s+16]\" % (buf, pos))\r\n r_8 = manner_fn(\"%s[%s]\" % (buf, pos), \"%s[%s+8]\" % (buf, pos))\r\n r_4 = manner_fn(\"%s[%s]\" % (buf, pos), \"%s[%s+4]\" % (buf, pos))\r\n r_2 = manner_fn(\"%s[%s]\" % (buf, pos), \"%s[%s+2]\" % (buf, pos))\r\n r_1 = manner_fn(\"%s[%s]\" % (buf, pos), \"%s[%s+1]\" % (buf, pos))\r\n\r\n return \"\"\"\r\n {\r\n // This function trashes buf[1..n_threads],\r\n // leaving the reduction result in buf[0].\r\n npy_%(dtype)s red = %(init)s;\r\n #pragma unroll 16\r\n for (int i = %(pos)s + %(count)s; i<%(N)s; i += %(count)s){\r\n red = %(loop_line)s;\r\n }\r\n buf[%(pos)s] = red;\r\n __syncthreads();\r\n if (%(pos)s < warpSize)\r\n {\r\n for (int i = %(pos)s + warpSize; i < %(count)s; i += warpSize)\r\n {\r\n %(buf)s[%(pos)s] = %(loop_line2)s;\r\n }\r\n if (%(pos)s < 16)\r\n {\r\n //reduce so that %(pos)s 0 has the reduction of everything\r\n if(%(pos)s + 16 < %(N)s)\r\n %(buf)s[%(pos)s] = %(r_16)s;\r\n if(%(pos)s + 8 < %(N)s)\r\n %(buf)s[%(pos)s] = %(r_8)s;\r\n if(%(pos)s + 4 < %(N)s)\r\n %(buf)s[%(pos)s] = %(r_4)s;\r\n if(%(pos)s + 2 < %(N)s)\r\n %(buf)s[%(pos)s] = %(r_2)s;\r\n if(%(pos)s + 1 < %(N)s)\r\n %(buf)s[%(pos)s] = %(r_1)s;\r\n }\r\n }\r\n }\r\n \"\"\" % locals()", "def combine_shares(share_list: List[bytes]) -> bytes:\n unpickled_share_list: List[List[Tuple[int, bytes]]] = [\n cast(List[Tuple[int, bytes]], pickle.loads(share)) for share in share_list\n ]\n\n chunk_num = len(unpickled_share_list[0])\n secret_padded = bytearray(0)\n chunk_shares_list: List[List[Tuple[int, bytes]]] = []\n for i in range(chunk_num):\n chunk_shares: List[Tuple[int, bytes]] = []\n for share in unpickled_share_list:\n chunk_shares.append(share[i])\n chunk_shares_list.append(chunk_shares)\n\n with ThreadPoolExecutor(max_workers=10) as executor:\n for chunk in executor.map(_shamir_combine, chunk_shares_list):\n secret_padded += chunk\n\n secret = unpad(secret_padded, 16)\n return bytes(secret)", "def sample_pairing(image1, image2, weight, name=None):\n with tf.name_scope(name or \"sample_pairing\"):\n paired_image = blend(image1, image2, weight)\n return paired_image", "def shared_dataset(data_x, data_y, borrow=True):\n shared_x = theano.shared(np.asarray(data_x,\n dtype=theano.config.floatX),\n borrow=borrow)\n shared_y = theano.shared(np.asarray(data_y, dtype=np.int32),\n borrow=borrow)\n return shared_x, shared_y", "def _mult_raster_op(array_a, array_b, nodata_a, nodata_b, target_nodata):\r\n result = numpy.empty(array_a.shape, dtype=numpy.float32)\r\n result[:] = target_nodata\r\n valid_mask = (array_a != nodata_a) & (array_b != nodata_b)\r\n result[valid_mask] = array_a[valid_mask] * array_b[valid_mask]\r\n return result", "def _clifford_swap(cls, slot_i, slot_j) -> Tensor:\n\n return Tensor(\n {\n Tensor._merge_keys((slot_j,), (slot_i,)): -1,\n Tensor._merge_keys(): 2 * cls.symmetric_bilinear_form(slot_i, slot_j),\n }\n )", "def check_correctness_bc01(f):\n\n rng = np.random.RandomState([2012, 7, 19])\n batch_size = 5\n rows = 32\n cols = 30\n channels = 3\n pool_rows = 2\n pool_cols = 3\n zv = rng.randn(batch_size, rows, cols,\n channels).astype(config.floatX) * 1. - 1.5\n top_down_v = rng.randn(batch_size, rows / pool_rows, cols / pool_cols,\n channels).astype(config.floatX)\n\n p_np, h_np = max_pool_python(zv, (pool_rows, pool_cols), top_down_v)\n\n z_th = T.TensorType(broadcastable=(False, False, False, False),\n dtype = config.floatX)()\n z_th.name = 'z_th'\n zr = z_th.dimshuffle(0, 3, 1, 2)\n\n top_down_th = T.TensorType(broadcastable=(False, False, False, False),\n dtype = config.floatX)()\n top_down_th.name = 'top_down_th'\n top_down_r = top_down_th.dimshuffle(0, 3, 1, 2)\n\n p_th, h_th = f(zr, (pool_rows, pool_cols), top_down_r)\n\n func = function([z_th, top_down_th], [p_th.dimshuffle(0, 2, 3, 1),\n h_th.dimshuffle(0, 2, 3, 1)])\n\n pv, hv = func(zv, top_down_v)\n\n assert p_np.shape == pv.shape\n assert h_np.shape == hv.shape\n if not np.allclose(h_np, hv):\n print((h_np.min(), h_np.max()))\n print((hv.min(), hv.max()))\n assert False\n if not np.allclose(p_np, pv):\n diff = abs(p_np - pv)\n print('max diff ', diff.max())\n print('min diff ', diff.min())\n print('ave diff ', diff.mean())\n assert False", "def client_side_sfsa_round3(communication, client_socket, FEDSUBAVG_SELF_STORAGE, FEDSUBAVG_OTHERS_STORAGE):\r\n start_time = time.time()\r\n # U2: Except myself\r\n fedsubavg_u2_live = list(set(FEDSUBAVG_SELF_STORAGE['U2']) - set([FEDSUBAVG_SELF_STORAGE['my_index']]))\r\n # U1/U2\r\n fedsubavg_u2_drop = FEDSUBAVG_SELF_STORAGE['U1\\U2']\r\n\r\n # Shares of self mask's seed for live clients\r\n fedsubavg_live_b_shares = dict()\r\n for client_index_live in fedsubavg_u2_live:\r\n fedsubavg_live_b_shares[client_index_live] = FEDSUBAVG_OTHERS_STORAGE[client_index_live]['share_b']\r\n fedsubavg_live_b_shares[FEDSUBAVG_SELF_STORAGE['my_index']] = FEDSUBAVG_SELF_STORAGE['my_share_b']\r\n\r\n # Shares of mutual mask's secret key for dropped clients\r\n fedsubavg_drop_s_shares = dict()\r\n for client_index_drop in fedsubavg_u2_drop:\r\n fedsubavg_drop_s_shares[client_index_drop] = FEDSUBAVG_OTHERS_STORAGE[client_index_drop]['share_ssk']\r\n\r\n write_csv(FEDSUBAVG_SELF_STORAGE['client_computation_time_path'], [FEDSUBAVG_SELF_STORAGE['communication_round_number'], \\\r\n \"sfsa_U3\", time.time() - start_time])\r\n\r\n # Send shares to the server\r\n fedsubavg_shares = {'client_ID': FEDSUBAVG_SELF_STORAGE['my_index'],\r\n 'live_b_shares': fedsubavg_live_b_shares,\r\n 'drop_s_shares': fedsubavg_drop_s_shares}\r\n communication.send_np_array(fedsubavg_shares, client_socket)\r\n print('Client %d sent secret shares of live and dropped clients in round 2 to server in secure federated submodel averaging'\\\r\n % FEDSUBAVG_SELF_STORAGE['my_index'])\r\n sys.stdout.flush()\r\n\r\n del fedsubavg_live_b_shares\r\n del fedsubavg_drop_s_shares", "def rmatmul(self, y: torch.Tensor) -> \"ShareTensor\":\n if isinstance(y, ShareTensor):\n raise ValueError(\"Private matmul not supported yet\")\n\n new_share = ShareTensor.apply_function(self, y, \"matmul\")\n return new_share", "def _gu_bilinear(self, h, r):\n mu1h = torch.matmul(self.mu1.weight, h.T) # [k, b]\n mu2r = torch.matmul(self.mu2.weight, r.T) # [k, b]\n return (mu1h * mu2r + self.bu.weight).T # [b, k]", "def duplicate(ctx, data):\n KERNEL = \"\"\"\n for (int j = 0; j < IN_BLOCK_SIZE; ++j) {\n b[OUT_BLOCK_SIZE*__id+j] = a[j];\n } \n \"\"\"\n mapper = Blockwise(ctx, map_expr=KERNEL, arguments=[\n ('a', 'global const', data.dtype, '*a'),\n ('b', 'global', data.dtype, '*b') \n ],\n in_blocksize=reduce(mul, data.shape),\n out_blocksize=reduce(mul, data.shape)\n )\n mapper.build()\n\n def _kernel(queue, length, b=None):\n if b is None:\n shape = [length] + list(data.shape[1:])\n b = cl.array.empty(queue, tuple(shape), data.dtype)\n\n mapper(queue, length, data.data, b.data)\n return b \n\n return _kernel", "def hash_function_multiplication(key, array_size, a):\n temp = a * key \n temp = temp - int(temp) \n\n return int(array_size * temp)", "def over(input_a, input_b):\n\n comp = input_b.duplicate()\n input_a.premult()\n ImageBufAlgo.over(comp, input_a, input_b)\n\n if comp.has_error:\n print \"Error merging over:\", comp.geterror()\n\n return comp", "def testMatchSwarpBilinearImage(self):\n self.compareToSwarp(\"bilinear\", useWarpExposure=False, atol=0.15)", "def SO4_circuit(a_alpha, a_theta, a_beta, b_alpha, b_theta, b_beta):\n # return np.kron(S1_inv, I2) @ np.kron(I2, S1_inv) @ np.kron(I2, R1_inv) @ CNOT2 \\\n # @ np.kron(I2, R_z(b_beta)) @ np.kron(I2, R_y(b_theta)) @ np.kron(I2, R_z(b_alpha)) \\\n # @ np.kron(R_z(a_beta), I2) @ np.kron(R_y(a_theta), I2) @ np.kron(R_z(a_alpha), I2) \\\n # @ CNOT2 @ np.kron(I2, R1) @ np.kron(I2, S1) @ np.kron(S1, I2)\n\n return np.linalg.inv(magic_gate) \\\n @ np.kron(I2, R_z(b_beta)) @ np.kron(I2, R_y(b_theta)) @ np.kron(I2, R_z(b_alpha)) \\\n @ np.kron(R_z(a_beta), I2) @ np.kron(R_y(a_theta), I2) @ np.kron(R_z(a_alpha), I2) \\\n @ magic_gate", "def shared_dataset_mask(data_x,data_y, data_z, borrow=True):\n\n shared_x = theano.shared(np.asarray(data_x,\n dtype='int32'),\n borrow=borrow)\n shared_y = theano.shared(np.asarray(data_y,\n dtype='int32'),\n borrow=borrow)\n shared_z = theano.shared(np.asarray(data_z,\n dtype='int32'),\n borrow=borrow)\n # When storing data on the GPU it has to be stored as floats\n # therefore we will store the labels as ``floatX`` as well\n # (``shared_y`` does exactly that). But during our computations\n # we need them as ints (we use labels as index, and if they are\n # floats it doesn't make sense) therefore instead of returning\n # ``shared_y`` we will have to cast it to int. This little hack\n # lets ous get around this issue\n\n return shared_x, shared_y,shared_z", "def tile_and_bind(s, tensor, y, x, y_factor, x_factor=None):\n x_factor = x_factor or y_factor\n yo, xo, yi, xi = s[tensor].tile(y, x, y_factor, x_factor)\n s[tensor].bind(xo, te.thread_axis(\"blockIdx.x\"))\n s[tensor].bind(xi, te.thread_axis(\"threadIdx.x\"))\n s[tensor].bind(yo, te.thread_axis(\"blockIdx.y\"))\n s[tensor].bind(yi, te.thread_axis(\"threadIdx.y\"))\n return yo, xo, yi, xi", "def block_inception_b(self,inputs, scope=None, reuse=None):\n # By default use stride=1 and SAME padding\n with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],\n stride=1, padding='SAME'):\n with tf.variable_scope(scope):\n with tf.variable_scope('Branch_0'):\n branch_0 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')\n with tf.variable_scope('Branch_1'):\n branch_1 = slim.conv2d(inputs, 96, [1, 1], scope='Conv2d_0a_1x1')\n branch_1 = slim.conv2d(branch_1, 112, [1, 3], scope='Conv2d_0b_1x7')\n branch_1 = slim.conv2d(branch_1, 64, [3, 1], scope='Conv2d_0c_7x1')\n with tf.variable_scope('Branch_2'):\n branch_2 = slim.conv2d(inputs, 86, [1, 1], scope='Conv2d_0a_1x1')\n branch_2 = slim.conv2d(branch_2, 86, [3, 1], scope='Conv2d_0b_7x1')\n branch_2 = slim.conv2d(branch_2, 112, [1, 3], scope='Conv2d_0c_1x7')\n branch_2 = slim.conv2d(branch_2, 112, [3, 1], scope='Conv2d_0d_7x1')\n branch_2 = slim.conv2d(branch_2, 128, [1, 3], scope='Conv2d_0e_1x7')\n with tf.variable_scope('Branch_3'):\n branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')\n branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')\n return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])" ]
[ "0.5719756", "0.5430887", "0.542316", "0.5418885", "0.5392883", "0.5379249", "0.53384435", "0.5258141", "0.5237195", "0.5228597", "0.5221881", "0.52066815", "0.51239055", "0.5099972", "0.5070661", "0.5048621", "0.5037715", "0.5023207", "0.4981468", "0.4955277", "0.49488163", "0.49291778", "0.4915449", "0.4913465", "0.49111488", "0.48928064", "0.48864293", "0.48749807", "0.48550478", "0.4853642" ]
0.63490194
0
Adds a new client to the database
def add_client(name): return create_client(name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_01_add_client(self):\n try:\n for k, v in self.test_data.items():\n client = Client()\n test_str = v.split(',')\n client.user_id = test_str[0]\n client.host_name = test_str[1]\n client.user_name = test_str[2]\n ClientsUnitTest._client_dao.add(client)\n self.assertTrue(\n ClientsUnitTest._client_dao.get_client(client.user_id))\n\n for k, v in self.test_data.items():\n client = Client()\n test_str = v.split(',')\n client.user_id = test_str[0]\n self.assertTrue(ClientsUnitTest._client_dao.delete(client))\n\n except DBException as error:\n print(error.get_message())", "def add_client(self, client):\n\n now = int(time.time())\n\n self.send_line(\"%s N %s 1 %d %s %s +ik ]]]]]] %s :%s\" %\\\n (self.config[\"numeric\"], client.nick, now, client.user,\n client.host, client.uid, client.gecos))", "def add_client(self, cli):\n if self.clients.count(cli) is 0:\n self.clients.append(cli)", "def register(self, client):\n self.clients.append(client)", "def add_client(self, client=None, activate=False):\n is_client = type(client) is Client\n id_exists = client.client_id in [c.client_id for c in self.client_list]\n id_exists_inactive = (\n self.inactive_list.client_exists(client)\n if not activate\n else False\n )\n id_is_empty = client.client_id == ''\n\n # cancel if it's no client or the client_id already exists or empty\n if not is_client or id_exists or id_exists_inactive or id_is_empty:\n return False\n\n # append the client and save it immediately\n self.client_list.append(client)\n self.save_client_to_file(client=client)\n\n return True", "def add_user_to_db(new_profile):\n try:\n params = (new_profile.client_nickname,\n new_profile.client_username,\n new_profile.client_hostname,\n new_profile.client_port)\n client_db.execute(\"INSERT INTO clients VALUES (?, ?, ?, ?)\", params)\n client_detail_list.commit()\n client_detail_list.close()\n except:\n print('User already exists, try deleting the profile first.')", "def add_client(name, sort_code):\n\n sql = u'INSERT INTO client_company_TBL ' \\\n u'(name, sort_code) ' \\\n u'VALUES (%s, %s);'\n data = (name, sort_code)\n c.execute(sql, data)\n conn.commit()", "def create(ctx, name, company, mail, age):\n client = Client(name,company,mail,age)\n client_service = ClientService(ctx.obj['clients_table']) \n client_service.create_client(client)", "def create_client(name):\n client = Client(name=name)\n print(client.client_secret)\n db.session.add(client)\n db.session.commit()\n return client", "def register_client(self, client, client_name):\n self.clients[client_name] = client", "def insert(self, c: Client) -> int:\n\n if not hasattr(c, 'id'):\n c.id = self.__counter\n self.__counter += 1\n\n self.__clients[c.id] = c\n\n return c.id", "def add_client_company(self, client_id):\n\n sql = u'INSERT INTO client_com_link_jobs_TBL ' \\\n u'(job_ID_year, job_ID_number, client_company_ID) ' \\\n u'VALUES (%s, %s, %s);'\n\n data = (self.job_number_sql[0], self.job_number_sql[1], client_id)\n\n c, conn = connection(self.company_schema)\n\n try:\n c.execute(sql, data)\n\n finally:\n conn_close(c, conn)", "def add(self, klient):\n try:\n c = self.conn.cursor()\n # zapisz klienta\n ilosc = sum([item.ilosc*item.oprocentowanie for item in klient.lokaty])\n c.execute('INSERT INTO Klient (id, imie, nazwisko, ilosc) VALUES(?, ?, ?, ?)',\n (klient.id, klient.imie, klient.nazwisko, str(klient.ilosc))\n )\n # zapisz lokaty klienta\n if klient.lokaty:\n for lokata in klient.lokaty:\n try:\n c.execute('INSERT INTO Lokaty (nazwa, ilosc, oprocentowanie, klient_id) VALUES(?,?,?,?)',\n (lokata.nazwa, str(lokata.ilosc), str(lokata.oprocentowanie), klient.id)\n )\n except Exception as e:\n #print \"item add error:\", e\n raise RepositoryException('error adding klient item: %s, to klient: %s' %\n (str(lokata), str(klient.id))\n )\n except Exception as e:\n #print \"klient add error:\", e\n raise RepositoryException('error adding klient %s' % str(klient))", "def client(db):\n client = ClientFactory()\n db.session.commit()\n return client", "def addOne():\n print(inspect.stack()[1][3])\n # read data from the API call\n req_data = request.get_json()\n json_data = {}\n\n for req in req_data:\n if (req in Followup.c.keys()):\n json_data[req] = req_data[req]\n\n query = (\n insert(Followup).\n values(json_data)\n )\n ResultProxy = connection.execute(query)\n if(not ResultProxy):\n return {'error': 'Unable to Add the given client'}\n return {'status': \"Adding Succesful\"}", "def add(self):\n\n db.session.add(self)\n db.session.commit()", "def add(self):\n\n db.session.add(self)\n db.session.commit()", "def add_new_client(self, host, identifier):\n client = None\n\n try:\n client = self.get_client_by_info(host, identifier)\n\n logging.debug(\"Found client matching host '%s', uuid: '%s'\",\n host, client.uuid)\n\n except NoClientFoundError:\n logging.debug(\"No client matching '%s' (%s), creating a new one\", \n host, identifier)\n\n client = AuthenticatedClient(host, identifier)\n\n with self.lock:\n self.clients.append(client)\n\n logging.debug(\"Created client for '%s' ('%s'). uuid: %s, token: %s\", \n host, identifier, client.uuid, client.token)\n\n finally:\n return client", "def addClient(self, msg):\r\n guiControlClientId = msg[Messages.FIELD_GUI_CONTROL]\r\n if guiControlClientId != None:\r\n self.controllingClient.clear()\r\n self.controllingClient[guiControlClientId] = msg[Messages.FIELD_GUI_CONTROL_HOST]\r\n LOG(\"Set a new controlling client: \" + repr(guiControlClientId) + \" - \" +\r\n repr(self.controllingClient[guiControlClientId]))\r\n guiMonitoringClientId = msg[Messages.FIELD_GUI_LIST]\r\n # This list only contain one client reference\r\n if guiMonitoringClientId != None:\r\n self.monitoringClients[guiMonitoringClientId] = msg[Messages.FIELD_GUI_HOST_LIST]\r\n LOG(\"Added a new monitoring client: \" + repr(guiMonitoringClientId) + \" - \" +\r\n repr(self.monitoringClients[guiMonitoringClientId]))", "def add_customer(db_url: str):\n db_url = \"{}/{}\".format(db_url, \"user_api\")\n engine = create_engine(db_url, echo=True)\n session = sessionmaker(engine)()\n customer = Customer()\n session.add(customer)\n session.commit()\n return customer.id", "def create_client():\n result = False\n if g.client_id in drivers:\n result = True\n return jsonify({'Success': result})", "def add_client_coop(self, cli):\n if self.cooplist.count(cli) is 0:\n self.cooplist.append(cli)", "def create_clients(client_name): # Crear nuevo Cliente\n global clients\n\n if client_name not in clients:\n clients.append(client_name)\n else:\n print('The client name is alredy in the client\\'s list')", "def newClient(self, cid, **kwargs):\n client = Iourt42Client(console=self.console, cid=cid, timeAdd=self.console.time(), **kwargs)\n self[client.cid] = client\n self.resetIndex()\n\n self.console.debug('Urt42 Client Connected: [%s] %s - %s (%s)', self[client.cid].cid, self[client.cid].name,\n self[client.cid].guid, self[client.cid].data)\n\n self.console.queueEvent(self.console.getEvent('EVT_CLIENT_CONNECT', data=client, client=client))\n\n if client.guid:\n client.auth()\n elif not client.authed:\n self.authorizeClients()\n return client", "def violation(self):\n\n self.client.add_client()", "def createClient(self, name, wid, notes=None):\n\n data = {}\n data['client'] = {}\n data['client']['name'] = name\n data['client']['wid'] = wid\n data['client']['notes'] = notes\n\n response = self.postRequest(Endpoints.CLIENTS, parameters=data)\n return self.decodeJSON(response)", "def insertar(self, cliente):\n self.enfila+=1\n self.fila.append(cliente)", "def insertar(self, cliente):\n self.enfila+=1\n self.fila.append(cliente)", "def add_connection_entry(self,client_id, display_name,session_id,host,conn,addr):\n self.connections[client_id] = {\n \"display_name\" : display_name,\n \"session_id\" : session_id,\n \"host\" : host,\n \"CONN\" : conn,\n \"ADDR\" : addr,\n \"connected\" : True\n }", "def create_client(self) -> None:\n pass" ]
[ "0.7131107", "0.6994528", "0.6953803", "0.6861296", "0.6846199", "0.6807965", "0.66636294", "0.65601414", "0.6553862", "0.6495806", "0.6450804", "0.64192295", "0.6418755", "0.63998187", "0.6399166", "0.6305849", "0.6305849", "0.6294406", "0.62839854", "0.6116749", "0.606965", "0.60681456", "0.60446346", "0.6033178", "0.60327905", "0.59941727", "0.5937163", "0.5937163", "0.59004325", "0.5860346" ]
0.7155781
0
Let's public and registered user submit a support ticket
def support_submit_ticket(self, **kw): person_name = "" if http.request.env.user.name != "Public user": person_name = http.request.env.user.name pic = http.request.env['schedule.person.in.charge'].sudo().search([],order = 'end_date desc',limit=1).pic return http.request.render('website_support_indonesia.support_submit_ticket', {'categories': http.request.env['website.support.ticket.categories'].sudo().search([('type_view','=',False)]), 'person_name': person_name, 'email': http.request.env.user.email, 'pic':pic})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_support_ticket(self, admin_uid, project_name, customer_name,\n tmp_name, supporter_name,\n customer_profile='TMS Customer Profile'):\n support_ticket_obj = self.registry('tms.support.ticket')\n partner_obj = self.registry('res.partner')\n project_obj = self.registry('tms.project')\n user_obj = self.registry('res.users')\n group_obj = self.registry('res.groups')\n cr = self.cr\n # Find the existed profiles\n customer_profile_id = group_obj.search(\n cr, admin_uid, [('name', '=', customer_profile)]\n )[0]\n tpm_profile_id = group_obj.search(\n cr, admin_uid, [('name', '=', 'Technical Project Manager Profile')]\n )[0]\n fc_profile_id = group_obj.search(\n cr, admin_uid, [('name', '=', 'Functional Consultant Profile')]\n )[0]\n # create TPM user\n tpm_user_vals = {\n 'name': tmp_name,\n 'login': tmp_name,\n 'password': 'tpm',\n 'email': '%[email protected]' % tmp_name,\n 'group_profile_id': tpm_profile_id,\n 'is_trobz_member': True,\n }\n tpm_uid = user_obj.create(cr, admin_uid, tpm_user_vals)\n\n # Create support user\n fc_user_vals = {\n 'name': supporter_name,\n 'login': supporter_name,\n 'password': 'supporter',\n 'email': '%[email protected]' % supporter_name,\n 'group_profile_id': fc_profile_id,\n 'is_trobz_member': True,\n }\n fc_uid = user_obj.create(cr, admin_uid, fc_user_vals)\n\n # Create a Partner, it is:\n # - Customer on Project form\n # - Employer on User form\n # - Customer on Support ticket form\n customer_vals = {\n 'name': customer_name,\n 'is_company': True,\n 'website': '%s-fake.com' % customer_name\n }\n customer_id = partner_obj.create(cr, admin_uid, customer_vals)\n\n # TPM creates a project\n # required for creating support ticket\n project_vals = {\n 'name': project_name,\n 'partner_id': customer_id,\n 'technical_project_manager_id': tpm_uid,\n 'state': 'active',\n 'default_supporter_id': fc_uid,\n 'project_supporter_rel_ids': [(4, fc_uid), (4, tpm_uid)]\n }\n # Computing the supporters here avoids the access control\n # related to `res.partner`.\n project_id = project_obj.create(\n cr, admin_uid, project_vals\n )\n # Create customer user\n customer_user_vals = {\n 'name': customer_name,\n 'login': customer_name,\n 'password': 'customer',\n 'email': '%[email protected]' % customer_name,\n 'group_profile_id': customer_profile_id,\n 'is_trobz_member': False,\n 'supporter_of_project_ids': [(6, 0, [project_id])],\n 'employer_id': customer_id,\n }\n customer_uid = user_obj.create(cr, admin_uid, customer_user_vals)\n # Customer create a support ticket\n support_ticket_vals = {\n 'reporter_id': customer_uid,\n 'summary': 'Support ticket test',\n 'description': 'Support Ticket Test',\n 'state': 'assigned',\n 'ticket_type': 'unclassified',\n 'priority': 'normal',\n 'project_id': project_id,\n 'customer_id': customer_id,\n }\n support_ticket_id = support_ticket_obj.create(\n cr, customer_uid, support_ticket_vals,\n {'test_support_ticket': True}\n )\n return customer_uid, support_ticket_id", "async def ticket_add(self, ctx, user: discord.Member):\n guild_settings = await self.config.guild(ctx.guild).all()\n is_admin = await is_admin_or_superior(self.bot, ctx.author) or any(\n [ur.id in guild_settings[\"supportroles\"] for ur in ctx.author.roles]\n )\n must_be_admin = not guild_settings[\"usercanmodify\"]\n\n if not is_admin and must_be_admin:\n await ctx.send(\"Only Administrators can add/remove other users to tickets.\")\n return\n elif not is_admin:\n author = ctx.author\n author_id = author.id\n elif is_admin:\n # Since the author isn't specified, and it's an admin, we need to guess on who\n # the author is\n inverted = {}\n for author_id, tickets in guild_settings[\"created\"].items():\n for ticket in tickets:\n inverted[ticket[\"channel\"]] = author_id\n try:\n author = ctx.guild.get_member(int(inverted[ctx.channel.id]))\n if author:\n author_id = author.id\n else:\n author_id = int(inverted[ctx.channel.id])\n except KeyError:\n author = ctx.author\n author_id = author.id\n\n index = None\n\n if not guild_settings[\"created\"][str(author_id)]:\n await ctx.send(\"You don't have any open tickets.\")\n return\n elif len(guild_settings[\"created\"][str(author_id)]) == 1:\n index = 0\n else:\n for i, ticket in enumerate(guild_settings[\"created\"][str(author_id)]):\n if ticket[\"channel\"] == ctx.channel.id:\n index = i\n break\n\n if index is None:\n await ctx.send(\n \"You have multiple tickets open. \"\n \"Please run this command in the ticket channel you wish to edit.\"\n )\n return\n\n channel = self.bot.get_channel(guild_settings[\"created\"][str(author_id)][index][\"channel\"])\n\n if user.id in guild_settings[\"created\"][str(author_id)][index][\"added\"]:\n await ctx.send(\"That user is already added.\")\n return\n\n adding_is_admin = await is_admin_or_superior(self.bot, user) or any(\n [ur.id in guild_settings[\"supportroles\"] for ur in user.roles]\n )\n\n if adding_is_admin:\n await ctx.send(\"You cannot add a user in support or admin team.\")\n return\n\n channel = self.bot.get_channel(guild_settings[\"created\"][str(author_id)][index][\"channel\"])\n if not channel:\n await ctx.send(\"The ticket channel has been deleted.\")\n return\n\n try:\n await channel.set_permissions(user, send_messages=True, read_messages=True)\n except discord.Forbidden:\n await ctx.send(\n \"The Manage Permissions channel for me has been removed. \"\n \"I am unable to modify this ticket.\"\n )\n return\n\n async with self.config.guild(ctx.guild).created() as created:\n created[str(author_id)][index][\"added\"].append(user.id)\n\n await ctx.send(f\"{user.mention} has been added to the ticket.\")", "async def submit(client, event,\n submission_reference_url: ('str', 'Please give a link to your submission'),\n ):\n if (event.guild is not None):\n return Embed('Error', 'Please use this channel in a private channel.')\n \n if not event.user.has_roole(ROLE__SUPPORT__VERIFIED):\n return Embed('Permission denied', f'You must have {ROLE__SUPPORT__VERIFIED.mention} role to invoke this '\n f'command.')\n \n if datetime.utcnow() >= QUALIFIER_DEADLINE:\n return Embed('Oh No!', 'Qualifier over', color = COLOR__EVENT)\n \n user = event.user\n await client.message_create(CHANNEL__SUPPORT__EVENT, f'{user:f}, [{user.id}] submitted:\\n'\n f'`{submission_reference_url}`')\n \n return Embed('Success', 'Noice', color = COLOR__EVENT)", "def ticket_created(self, ticket):", "def __create_ticket(user, subject, description, topic):\n\n target = settings.SLACK_TARGET_TFED\n if topic == 'Database':\n target = settings.SLACK_TARGET_TFED_DB\n user_email = user['user']['profile'].get('email', '[email protected]')\n display_name = user['user']['profile']['real_name']\n resp = rt_api.create_ticket(topic, user_email, subject, description + \"\\n\\n- \" + display_name)\n ticket_id = resp.get('id', None)\n if ticket_id:\n ticket_info = {\n \"url\": 'https://lnl-rt.wpi.edu/rt/Ticket/Display.html?id=' + ticket_id,\n \"id\": ticket_id,\n \"subject\": subject,\n \"description\": description,\n \"status\": \"New\",\n \"assignee\": None,\n \"reporter\": user['user']['name']\n }\n ticket = views.tfed_ticket(ticket_info)\n slack_post(target, text=description, content=ticket, username='Request Tracker')\n return\n error_message = \"Whoops! It appears something went wrong while attempting to submit your request. \" \\\n \"Please wait a few minutes then try again. If the problem persists, please email \" \\\n \"us directly at [email protected].\"\n post_ephemeral(target, error_message, user['user']['id'], username=\"Request Tracker\")", "def create_ticket(self, user):\n return Ticket.objects.create_ticket('test', user)", "def prepare_ticket(self, req, ticket, fields, actions):", "def _record_feedback_in_zendesk(realname, email, subject, details, tags, additional_info):\r\n zendesk_api = _ZendeskApi()\r\n\r\n additional_info_string = (\r\n \"Additional information:\\n\\n\" +\r\n \"\\n\".join(\"%s: %s\" % (key, value) for (key, value) in additional_info.items() if value is not None)\r\n )\r\n\r\n # Tag all issues with LMS to distinguish channel in Zendesk; requested by student support team\r\n zendesk_tags = list(tags.values()) + [\"LMS\"]\r\n new_ticket = {\r\n \"ticket\": {\r\n \"requester\": {\"name\": realname, \"email\": email},\r\n \"subject\": subject,\r\n \"comment\": {\"body\": details},\r\n \"tags\": zendesk_tags\r\n }\r\n }\r\n try:\r\n ticket_id = zendesk_api.create_ticket(new_ticket)\r\n except zendesk.ZendeskError as err:\r\n log.error(\"Error creating Zendesk ticket: %s\", str(err))\r\n return False\r\n\r\n # Additional information is provided as a private update so the information\r\n # is not visible to the user.\r\n ticket_update = {\"ticket\": {\"comment\": {\"public\": False, \"body\": additional_info_string}}}\r\n try:\r\n zendesk_api.update_ticket(ticket_id, ticket_update)\r\n except zendesk.ZendeskError as err:\r\n log.error(\"Error updating Zendesk ticket: %s\", str(err))\r\n # The update is not strictly necessary, so do not indicate failure to the user\r\n pass\r\n\r\n return True", "def submit_staff_ticket(request):\n if request.is_ajax and request.method == 'POST':\n data = dict()\n \n data['user'] = str(request.user.id)\n data['url'] = request.POST['url']\n data['message'] = request.POST['message']\n data['object_id'] = int(request.POST['object_id'])\n url =''\n for s in request.POST['url'].split('/')[3:]:\n url+='/'+s\n url_func_name = resolve(url)[0].__name__\n # get function name\n if 'minus' in url_func_name:\n ct = ContentType.objects.get(app_label = 'minusstore',\n model = 'minusrecord')\n data['content_type'] = str(ct.id)\n st = StaffTicketForm(data )\n if st.is_valid():\n st.save()\n\n \n return HttpResponse(\"ok\")\n else:\n raise Http404", "def test_credit_ticket_as_user(self):\n user = UserFactory()\n self.assertEqual(user.tickets, 1)\n nb_tickets_to_add = 5\n data = {\n 'nb_tickets': nb_tickets_to_add,\n }\n\n self.client.force_authenticate(user=self.user)\n response = self.client.post(\n reverse(\n 'user-credit-tickets',\n kwargs={'pk': user.id},\n ),\n data,\n format='json',\n )\n self.assertEqual(\n response.status_code,\n status.HTTP_403_FORBIDDEN,\n )", "def issue_ticket(database, user):\n try:\n # check if user is an officer\n c = database.cursor()\n c.execute('SELECT utype FROM users WHERE uid = ?', (user, ))\n user_type = c.fetchone()[0]\n\n # If user is an officer \n if user_type == 'o':\n reg_num = int(input(\"Registration number: \"))\n c.execute(\"\"\"SELECT p.fname, p.lname, v.make, v.model, v.year, v.color FROM registrations r JOIN\n persons p ON (r.fname, r.lname) = (p.fname, p.lname) JOIN vehicles v ON r.vin = v.vin WHERE r.regno = ?\"\"\",(reg_num,))\n result = c.fetchone()\n fname = result[0]\n lname = result[1]\n make = result[2]\n model = result[3]\n year = result[4]\n color = result[5]\n print(\"\\n--------------------------\\nInformation\\n--------------------------\\n\")\n print(\"First Name: \", fname)\n print(\"Last Name: \", lname)\n print(\"Make: \", make)\n print(\"Model: \", model)\n print(\"Year: \", year)\n print(\"Color: \", color)\n\n print(\"\\n-------------------------\\nTicket the registra: \\n------------------------\\n\")\n violation_date = str(input(\"Violation Date: \")) # if not provided, today's date\n if violation_date == \"\":\n violation_date = datetime.today().strftime('%Y-%m-%d')\n violation_text = str(input(\"violation Text: \"))\n amount = str(input(\"Amount: \"))\n tno = randrange(1001, 9867699)\n\n c.execute(q.insert_into_tickets, (tno, reg_num, amount, violation_text, violation_date))\n\n database.commit()\n print(pm.all_done)\n # if user is not an officer\n else:\n print(pm.for_officers_only)\n sys.exit()\n except:\n print(pm.something_went_wrong)\n sys.exit()", "def submit(request):\n if not request.user.is_authenticated():\n return proceed(request)\n # If dev has already agreed, continue to next step.\n user = UserProfile.objects.get(pk=request.user.id)\n if not user.read_dev_agreement:\n return redirect('submit.app.terms')\n return manifest(request)", "async def create_ticket(self, member : Member, guild : Guild):\n licence_id = await servers.get_licence_id(guild.id)\n category : CategoryChannel = guild.get_channel(await self.categorys.get_category_id(licence_id))\n role = guild.get_role(await self.roles.get_role_id(licence_id))\n \n\n channel : TextChannel = await category.create_text_channel(f'ticket-{member.name}')\n\n overwrite_everyone = PermissionOverwrite()\n overwrite_everyone.send_messages = False\n overwrite_everyone.read_messages = False\n\n overwrite_member = PermissionOverwrite()\n overwrite_member.send_messages = True\n overwrite_member.read_messages = True\n\n\n everyone_role = guild.default_role\n\n await channel.set_permissions(target=everyone_role,overwrite=overwrite_everyone)\n await channel.set_permissions(target=member, overwrite=overwrite_everyone)\n await channel.set_permissions(target=role, overwrite=overwrite_member)\n await channel.send(content = member.mention + \" \" + role.mention)", "async def add(self, ctx, member: Member):\n await self.create_ticket(member,ctx.guild)\n embed : Embed = settings.get_ticket_panel_embed()\n embed.description = 'Ticket created with success!'\n embed.set_footer(text=embed.footer.text, icon_url=self.bot.user.avatar_url)\n await ctx.message.delete()\n await ctx.send(embed=embed)", "def add_ticket(self, user):\n profile = user.get_profile()\n if profile.available_tickets() <= 0:\n raise Exception(\"This user does not have any tickets to allocate.\")\n \n ticket = RaffleTicket(raffle_prize=self, user=user)\n ticket.save()", "def save(self, user):\n\n q = self.cleaned_data['queue']\n\n t = Ticket( title = self.cleaned_data['title'],\n submitter_email = self.cleaned_data['submitter_email'],\n account = self.cleaned_data['account'],\n created = datetime.now(),\n status = Ticket.OPEN_STATUS,\n queue = q,\n description = self.cleaned_data['body'],\n priority = self.cleaned_data['priority'],\n owner = self.cleaned_data['owner']\n )\n\n if HAS_TAG_SUPPORT:\n t.tags = self.cleaned_data['tags']\n\n if self.cleaned_data['assigned_to']:\n try:\n t.assigned_to = self.cleaned_data['assigned_to']\n except User.DoesNotExist:\n t.assigned_to = None\n t.save()\n\n f = FollowUp( ticket = t,\n title = _('Ticket Opened'),\n date = datetime.now(),\n public = False,\n comment = self.cleaned_data['body'],\n systemuser = user.account,\n )\n if self.cleaned_data['assigned_to']:\n f.title = _('Ticket Opened & Assigned to %(name)s') % {\n 'name': t.get_assigned_to\n }\n\n f.save()\n \n files = []\n if self.cleaned_data['attachment']:\n import mimetypes\n file = self.cleaned_data['attachment']\n filename = file.name.replace(' ', '_')\n a = Attachment(\n followup=f,\n filename=filename,\n mime_type=mimetypes.guess_type(filename)[0] or 'application/octet-stream',\n size=file.size,\n )\n a.file.save(file.name, file, save=False)\n a.save()\n \n if file.size < getattr(settings, 'MAX_EMAIL_ATTACHMENT_SIZE', 512000):\n # Only files smaller than 512kb (or as defined in \n # settings.MAX_EMAIL_ATTACHMENT_SIZE) are sent via email.\n files.append(a.file.path)\n\n context = {\n 'ticket': t,\n 'queue': q,\n 'comment': f.comment,\n }\n \n messages_sent_to = []\n\n if t.submitter_email:\n send_templated_mail(\n 'newticket_owner',\n context,\n recipients=t.submitter_email,\n sender=q.from_address,\n fail_silently=True,\n files=files,\n )\n messages_sent_to.append(t.submitter_email)\n\n #FIX USERSETTINGS\n #=======================================================================\n # if t.assigned_to and t.assigned_to != user and getattr(t.assigned_to.usersettings.settings, 'email_on_ticket_assign', False) and t.assigned_to.email and t.assigned_to.email not in messages_sent_to:\n # send_templated_mail(\n # 'assigned_to',\n # context,\n # recipients=t.assigned_to.email,\n # sender=q.from_address,\n # fail_silently=True,\n # files=files,\n # )\n # messages_sent_to.append(t.assigned_to.email)\n #=======================================================================\n\n if q.new_ticket_cc and q.new_ticket_cc not in messages_sent_to:\n send_templated_mail(\n 'newticket_cc',\n context,\n recipients=q.new_ticket_cc,\n sender=q.from_address,\n fail_silently=True,\n files=files,\n )\n messages_sent_to.append(q.new_ticket_cc)\n\n if q.updated_ticket_cc and q.updated_ticket_cc != q.new_ticket_cc and q.updated_ticket_cc not in messages_sent_to:\n send_templated_mail(\n 'newticket_cc',\n context,\n recipients=q.updated_ticket_cc,\n sender=q.from_address,\n fail_silently=True,\n files=files,\n )\n\n return t", "def ticket_submit_callback(self, data):\n self.output('staged order ticket submitted: %s' % repr(data))", "def test_sell_ticket_posted(self, *_):\n # logout to invalidate any logged in session\n self.open(base_url + '/logout')\n # login a user\n self.open(base_url + '/login')\n # fill email and password\n self.type(\"#email\", \"[email protected]\")\n self.type(\"#password\", \"Test_frontend@\")\n # click enter button\n self.click('input[type=\"submit\"]')\n # open the /sell route\n self.open(base_url)\n # Enter an invalid ticket info, should be redirected to / route\n self.type('#name_sell', \"t1\")\n self.type(\"#price_sell\", \"100\")\n self.type(\"#quantity_sell\", \"2\")\n self.type(\"#exp_date_sell\", \"20200921\")\n self.click('#submit-sell')\n self.assert_element(\"#welcome-header\")\n # Assert that the valid error message is shown.\n self.assert_text(\"Hi test_frontend\", \"#welcome-header\")", "def Ticket(ticket):\n try:\n data = ticket_module.verify(ticket)\n name = data['slivers'][0]['name']\n if data != None:\n deliver_ticket(data)\n logger.log('api_calls: Ticket delivered for %s' % name)\n Create(database.db.get(name))\n except Exception, err:\n raise xmlrpclib.Fault(102, 'Ticket error: ' + str(err))", "def save(self,owner=None):\n\n q = Queue.objects.get(id=int(self.cleaned_data['queue']))\n\n t = Ticket(\n title = self.cleaned_data['title'],\n owner=owner,\n submitter_email = self.cleaned_data['submitter_email'],\n created = datetime.now(),\n status = Ticket.OPEN_STATUS,\n queue = q,\n description = self.cleaned_data['body'],\n priority = self.cleaned_data['priority'],\n account = owner.account\n )\n\n t.save()\n\n f = FollowUp(\n ticket = t,\n title = _('Ticket Opened Via Web'),\n date = datetime.now(),\n public = True,\n comment = self.cleaned_data['body'],\n account = owner.account\n )\n\n f.save()\n\n files = []\n if self.cleaned_data['attachment']:\n import mimetypes\n file = self.cleaned_data['attachment']\n filename = file.name.replace(' ', '_')\n a = Attachment(\n followup=f,\n filename=filename,\n mime_type=mimetypes.guess_type(filename)[0] or 'application/octet-stream',\n size=file.size,\n )\n a.file.save(file.name, file, save=False)\n a.save()\n \n if file.size < getattr(settings, 'MAX_EMAIL_ATTACHMENT_SIZE', 512000):\n # Only files smaller than 512kb (or as defined in \n # settings.MAX_EMAIL_ATTACHMENT_SIZE) are sent via email.\n files.append(a.file.path)\n\n context = {\n 'ticket': t,\n 'queue': q,\n }\n\n messages_sent_to = []\n\n send_templated_mail(\n 'newticket_owner',\n context,\n recipients=t.submitter_email,\n sender=q.from_address,\n fail_silently=True,\n files=files,\n )\n messages_sent_to.append(t.submitter_email)\n\n if q.new_ticket_cc and q.new_ticket_cc not in messages_sent_to:\n send_templated_mail(\n 'newticket_cc',\n context,\n recipients=q.new_ticket_cc,\n sender=q.from_address,\n fail_silently=True,\n files=files,\n )\n messages_sent_to.append(q.new_ticket_cc)\n\n if q.updated_ticket_cc and q.updated_ticket_cc != q.new_ticket_cc and q.updated_ticket_cc not in messages_sent_to:\n send_templated_mail(\n 'newticket_cc',\n context,\n recipients=q.updated_ticket_cc,\n sender=q.from_address,\n fail_silently=True,\n files=files,\n )\n\n return t", "def AdminTicket(ticket):\n try:\n data, = xmlrpclib.loads(ticket)[0]\n name = data['slivers'][0]['name']\n if data != None:\n deliver_ticket(data)\n logger.log('api_calls: Admin Ticket delivered for %s' % name)\n Create(database.db.get(name))\n except Exception, err:\n raise xmlrpclib.Fault(102, 'Ticket error: ' + str(err))", "def requestSubmitted(request):", "def create_ticket(data):\n firebase_uid = data['session'].split('/')[-1]\n contexts = data['queryResult']['outputContexts']\n for i in contexts:\n if 'visit_data' in i['name']:\n context = i\n break\n\n date = datetime.datetime.now()\n date = date.strftime(\"%d-%m-%Y\")\n\n raw_params = context['parameters']\n ticket_params = {\n \"Agent\": \"None\",\n \"Product Type\": raw_params[\"product_type\"],\n \"Type\": \"House Call\",\n \"Issue Type\": raw_params[\"issue_type\"],\n \"Description\": raw_params[\"description\"],\n \"Model Number\": raw_params[\"model_number\"],\n \"Serial Number\": raw_params[\"serial_number\"],\n \"Status\": \"Open\",\n \"Date\": date,\n \"Time Slot Chosen\": \"0\",\n \"Time Slots\": {\"Slot 1\": {\"Time\": \"0\", \"Date\": \"0\"},\n \"Slot 2\": {\"Time\": \"0\", \"Date\": \"0\"},\n \"Slot 3\": {\"Time\": \"0\", \"Date\": \"0\"}},\n \"Progress\": \"Under Review\",\n \"Free Time\": {\n \"Date\": \"0\",\n \"Time\": \"0\",\n },\n \"Details of Call\": {\n \"Time\": \"0\",\n \"Date\": \"0\"}\n }\n ticket_id = str(uuid.uuid4())[:8]\n db = firebase.database()\n db.child(\n 'user_data').child(\n firebase_uid).child(\n 'Complaints').child(ticket_id).set(ticket_params)\n\n fulfillment_response = {\n \"fulfillmentText\":\n \"You ticket was successfully registered. The reference number is \" + ticket_id +\n \". Based on the availability of our agents, we will give you three time slots to choose from. You can \"\n \"either go to the \\\"Tickets\\\" section of the app and update your preference or do so by talking to me.\"}\n return fulfillment_response", "async def support(self, ctx):\n await ctx.send('Join the support server here: https://discord.gg/bAq8Ec5JPQ')", "def submit_feedback(request):\r\n if not settings.FEATURES.get('ENABLE_FEEDBACK_SUBMISSION', False):\r\n raise Http404()\r\n if request.method != \"POST\":\r\n return HttpResponseNotAllowed([\"POST\"])\r\n if (\r\n not settings.ZENDESK_URL or\r\n not settings.ZENDESK_USER or\r\n not settings.ZENDESK_API_KEY\r\n ):\r\n raise Exception(\"Zendesk enabled but not configured\")\r\n\r\n def build_error_response(status_code, field, err_msg):\r\n return HttpResponse(json.dumps({\"field\": field, \"error\": err_msg}), status=status_code)\r\n\r\n additional_info = {}\r\n\r\n required_fields = [\"subject\", \"details\"]\r\n if not request.user.is_authenticated():\r\n required_fields += [\"name\", \"email\"]\r\n required_field_errs = {\r\n \"subject\": \"Please provide a subject.\",\r\n \"details\": \"Please provide details.\",\r\n \"name\": \"Please provide your name.\",\r\n \"email\": \"Please provide a valid e-mail.\",\r\n }\r\n\r\n for field in required_fields:\r\n if field not in request.POST or not request.POST[field]:\r\n return build_error_response(400, field, required_field_errs[field])\r\n\r\n subject = request.POST[\"subject\"]\r\n details = request.POST[\"details\"]\r\n tags = dict(\r\n [(tag, request.POST[tag]) for tag in [\"issue_type\", \"course_id\"] if tag in request.POST]\r\n )\r\n\r\n if request.user.is_authenticated():\r\n realname = request.user.profile.name\r\n email = request.user.email\r\n additional_info[\"username\"] = request.user.username\r\n else:\r\n realname = request.POST[\"name\"]\r\n email = request.POST[\"email\"]\r\n try:\r\n validate_email(email)\r\n except ValidationError:\r\n return build_error_response(400, \"email\", required_field_errs[\"email\"])\r\n\r\n for header, pretty in [\r\n (\"HTTP_REFERER\", \"Page\"),\r\n (\"HTTP_USER_AGENT\", \"Browser\"),\r\n (\"REMOTE_ADDR\", \"Client IP\"),\r\n (\"SERVER_NAME\", \"Host\")\r\n ]:\r\n additional_info[pretty] = request.META.get(header)\r\n\r\n success = _record_feedback_in_zendesk(realname, email, subject, details, tags, additional_info)\r\n _record_feedback_in_datadog(tags)\r\n\r\n return HttpResponse(status=(200 if success else 500))", "async def ticket(self, ctx, ticketpanel_name: str):\n licence_id = servers.get_licence_id(ctx.guild.id)\n ticketpanel: Ticketpanel = await Ticketpanel.query.where(Ticketpanel.name == ticketpanel_name).where(Ticketpanel.licence_id == licence_id).gino.first()\n\n if not ticketpanel:\n embed: Embed = settings.get_ticket_error_embed()\n embed.description = f\"\\nTicketPanel called **{ticketpanel_name}** doesnt exist\\n\"\n embed.set_footer(text=embed.footer.text,\n icon_url=self.bot.user.avatar_url)\n await ctx.send(embed=embed)\n return\n\n embed : Embed = settings.get_ticket_panel_embed()\n embed.description = ticketpanel.description\n embed.set_footer(text=embed.footer.text,\n icon_url=self.bot.user.avatar_url)\n await ctx.message.delete()\n message = await ctx.send(embed=embed)\n await message.add_reaction(settings.get_ticket_create_emoji())", "def abrirTicket(self):\n return {\n \"type\": \"ir.actions.act_url\",\n \"url\": \"https://gnsys-corp.odoo.com/web#id= \" + str(self.ticket_id_existente) + \" &action=400&active_id=9&model=helpdesk.ticket&view_type=form&menu_id=406\",\n \"target\": \"new\",\n }", "def mailissue(request):\n if not request.issue.edit_allowed:\n if not IS_DEV:\n return HttpTextResponse('Login required', status=401)\n issue = request.issue\n msg = _make_message(request, issue, '', '', True)\n issue.put()\n msg.put()\n\n return HttpTextResponse('OK')", "def test_ticketSuccess(self, *_):\n # open logout page\n self.open(base_url + '/logout')\n # open login page\n self.open(base_url + '/login')\n # fill email and password\n self.type(\"#email\", test_user.email)\n self.type(\"#password\", test_user.password)\n # click enter button\n self.click('input[type=\"submit\"]')\n \n # test if the page that loads is the home page and that it loads correctly\n self.assert_element(\"#welcome-header\")\n self.assert_text(\"Welcome \" + test_user.name, \"#welcome-header\")\n # test if the tickets show up\n self.assert_element(\"#tickets div h4\")\n self.assert_text(test_tickets[0].name + \" \" + test_tickets[0].owner, \"#tickets div h4\")\n self.assert_element(\"#tickets div h5\")\n self.assert_text(\"Quantity: \" + str(test_tickets[0].quantity) + \" Price: \" + str(test_tickets[0].price), \"#tickets div h5\")\n\n # change the test_tickets variable to reflect the updates to the first \n # ticket, so get_all_tickets will return the updated ticket information\n test_tickets[0].quantity = updated_ticket.quantity\n test_tickets[0].price = updated_ticket.price\n test_tickets[0].date = updated_ticket.date\n\n # fill in name, quantity, price and date in the update form\n self.type(\"#update_form form div #name\", updated_ticket.name)\n self.type(\"#update_form form div #quantity\", str(updated_ticket.quantity))\n self.type(\"#update_form form div #price\", str(updated_ticket.price))\n self.type(\"#update_form form div #date\", updated_ticket.date)\n self.click('#update_form form div input[type=\"submit\"]')\n\n # test if the page that loads is the home page and that it loads correctly\n self.assert_element(\"#welcome-header\")\n self.assert_text(\"Welcome \" + test_user.name, \"#welcome-header\")\n # test if the tickets show up\n self.assert_element(\"#tickets div h4\")\n self.assert_text(test_tickets[0].name + \" \" + test_tickets[0].owner, \"#tickets div h4\")\n self.assert_element(\"#tickets div h5\")\n self.assert_text(\"Quantity: \" + str(test_tickets[0].quantity) + \" Price: \" + str(test_tickets[0].price), \"#tickets div h5\")\n\n # logout\n self.open(base_url + '/logout')", "def raise_jira_ticket(obj,org_id):\n try:\n app_id = obj.get('app_id','') \n vul_name = obj.get('vul_name','')\n cwe = int(obj.get('cwe',0))\n project_key = obj.get('project_key','')\n issuetype = obj.get('issuetype','Bug')\n assignee = obj.get('assignee')\n app_obj = Application.objects.get(pk=app_id)\n if app_id and vul_name:\n vuls = Vulnerability.objects.filter(is_false_positive=False,is_remediated=False,scan__application=app_obj,cwe=cwe,name=vul_name)\n jira_obj = JiraIssueTypes.objects.get(org__id=org_id)\n jira = get_jira_con(jira_obj) \n if jira and vuls.exists(): \n complete_desc = ''\n references = '' \n if app_obj:\n complete_desc += 'Application:\\n{0}\\n\\n'.format(app_obj.name)\n complete_desc += 'Application URL:\\n{0}\\n\\n'.format(app_obj.url)\n if cwe:\n complete_desc += 'CWE :\\n{0}\\n\\n'.format(cwe)\n org_obj = app_obj.org\n if org_obj.orl_config_exists():\n vul_info = get_open_vul_info_from_api(cwe,org_obj)\n complete_desc += 'Description:\\n{0}\\n\\n'.format(vul_info.get('description','')) \n if references:\n complete_desc += 'References:\\n{0}'.format(references) \n data_dict = {\n 'project':{'key':project_key },\n 'issuetype':{'name': issuetype},\n 'priority':{'name': 'Highest'},\n 'summary':vul_name,\n 'description':complete_desc, \n } \n new_issue = jira.create_issue(**data_dict) \n evids = VulnerabilityEvidence.objects.filter(vul__in=vuls) \n attachment = io.StringIO()\n attachment.write('Evidences') \n for evid in evids:\n data = '\\n\\t- {0}\\n\\t\\t- {1}'.format(evid.url,evid.name)\n attachment.write(data) \n jira.add_attachment(issue=new_issue, attachment=attachment, filename='evidences.txt') \n vuls.update(jira_id=str(new_issue),jira_issue_status=str(new_issue.fields.status))\n info_debug_log(event='Raise Jira ticket',status='success')\n if assignee:\n jira.assign_issue(new_issue,assignee)\n info_debug_log(event='Assign Jira ticket to an assignee',status='success')\n except BaseException as e:\n print(\"Error raising JIRA tickets\")\n # general_error_messages.delay(path='raise_jira_ticket function',msg=log_exception(e))\n critical_debug_log(event=e,status='failure')" ]
[ "0.69233453", "0.65590894", "0.65289736", "0.6415005", "0.64081705", "0.638714", "0.6355518", "0.62974226", "0.62485695", "0.624069", "0.6219356", "0.6219195", "0.62152666", "0.62112", "0.6184423", "0.61602175", "0.61165065", "0.6109556", "0.6039671", "0.60171163", "0.6001981", "0.59918875", "0.59508246", "0.59425175", "0.59366167", "0.5921043", "0.5907781", "0.5859897", "0.58525133", "0.58404714" ]
0.77420163
0
Create a new session and persist it according to its username and token values.
def _new_session(self, username_key=None, **attributes): for key in ['username', 'token', 'tenant_id']: if attributes.get(key, None) is None: attributes[key] = key + "_" + text_type(uuid4()) if 'expires' not in attributes: attributes['expires'] = ( datetime.utcfromtimestamp(self._clock.seconds()) + timedelta(days=1) ) session = Session(**attributes) if username_key is None: username_key = session.username self._username_to_token[username_key] = session.token self._token_to_session[session.token] = session self._tenant_to_token[session.tenant_id] = session.token return session
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_new_session_token(self):\n session_token = self.__generate_session_token()\n payload = {\n 'token' : session_token\n }\n self.encoded_token = jwt.encode(payload, 'secret', algorithm='HS256')\n Token.objects.create(session_token=session_token)", "def create_session(self):\n self._session = self.create_scoped_session()\n self.session = self._session()", "def create_new_session(self, username):\n return self.session_mgr.create_new_session(username)", "def create_session(self, username, password, expires_at=None):\n\n session = self.user_manager.create_user_session(\n username=username, password=password, expires_at=expires_at\n )\n\n if not session:\n return None, \"Invalid credentials\"\n\n return {\n 'session_token': session.session_token,\n 'user_id': session.user_id,\n 'created_at': session.created_at.isoformat(),\n 'expires_at': session.expires_at.isoformat()\n if session.expires_at\n else None,\n }", "def create_session(self, **params):\n raise NotImplementedError('Should be implemented by a sub-class.')", "def create_session(self,session_id,host_id,host_name,spotify_token):\n self.sessions[session_id] = {\n \"HOST\" : {\n \"ID\" : host_id,\n \"NAME\" : host_name,\n \"spotify_token\" : spotify_token,\n \"spotify_player\": None,\n },\n \"queue\" : [],\n \"queue_lock\" : False,\n \"current_track\" : \"\",\n \"previous_track\" : \"\",\n \"USERS\" : {}\n }", "async def create(self, session, *, dc=None):\n response = await self._api.put(\n \"/v1/session/create\",\n data=session,\n params={\"dc\": dc})\n return response.body", "def create_session():\n app = Application.query.filter_by(id=request.json['app']).first()\n questionnaire = Questionnaire.query.filter_by(id=request.json['questionnaire']).first()\n expected_emotions = request.json['expected_emotions']\n\n # validate application type\n if not app:\n return {'status': 'error', 'message': 'Invalid application.'}, 400\n\n new_session = Session(app=app, expected_emotions=expected_emotions, questionnaire=questionnaire)\n\n db.session.add(new_session)\n db.session.commit()\n\n result = session_schema.dump(new_session).data\n\n return jsonify({'status': 'success', 'message': 'Created new session for application with id of {}.'.format(request.json['app']), 'data': result}), 201", "def create(self, user, token):\n\n session['user'] = {\n 'id': str(user.id),\n 'login': user.login,\n 'token': token\n }\n\n return UserSession.create(session['user'])", "def create_new_session(self) -> None:\n try:\n session = self.client.create_session()\n logger.info(\"created session: %s\", session.id)\n self.join_session(session.id)\n location_config = self.app.guiconfig.location\n self.session.location = SessionLocation(\n x=location_config.x,\n y=location_config.y,\n z=location_config.z,\n lat=location_config.lat,\n lon=location_config.lon,\n alt=location_config.alt,\n scale=location_config.scale,\n )\n except grpc.RpcError as e:\n self.app.show_grpc_exception(\"New Session Error\", e)", "def make_new_session():\n session = Session.objects.create(uuid=str(uuid4()), container_id=None)\n return session.id", "def create_authed_session(self, username, password):\n login_page = self.browser.get('https://www.predictit.org/')\n login_form = login_page.soup.find('form', id='loginForm')\n login_form.select('#Email')[0]['value'] = username\n login_form.select('#Password')[0]['value'] = password\n self.browser.submit(login_form, login_page.url)\n return self.browser", "def create_session(self, user_id, **kwargs):\n defaults = {\n 'created_at': time.time()\n }\n defaults.update(kwargs)\n self.save_session(user_id, defaults)\n if self.max_session_length:\n self.schedule_session_expiry(user_id, self.max_session_length)\n return self.load_session(user_id)", "def post(self, args):\n\n response = openvidu().post_session(args)\n\n if response.status_code == 200:\n session = response.json()\n current_app.logger.info(f'Created new session `{session[\"id\"]}`')\n\n # Store session parameters in database to recreate it if necessary\n db = current_app.session\n db.add(Session(id=session[\"id\"], parameters=args))\n db.commit()\n return session\n elif response.status_code == 400:\n abort(UnprocessableEntity, json=response.json().get(\"message\"))\n abort(response)", "async def session(self, request):\n body = await api_validate(SCHEMA_SESSION, request)\n self._check_password(body)\n\n # check TOTP\n if self.config.security_totp:\n totp = pyotp.TOTP(self.config.security_totp)\n if body[ATTR_TOTP] != totp.now():\n raise RuntimeError(\"Invalid TOTP token!\")\n\n # create session\n valid_until = datetime.now() + timedelta(days=1)\n session = hashlib.sha256(os.urandom(54)).hexdigest()\n\n # store session\n self.config.add_security_session(session, valid_until)\n return {ATTR_SESSION: session}", "def _create_login_session(self):\r\n sess = requests.Session()\r\n r = sess.get(self.page(self.LOGIN_PAGE), verify=self.verify)\r\n if r.status_code == 200:\r\n csrf_token = EndaceWebSession.find_csrf_token_login(r.content)\r\n if csrf_token is None:\r\n raise Exception(\"Could not find CSRF token\")\r\n # Submit login form\r\n login_result = sess.post(self.page(self.LOGIN_ACTION),\r\n data={\r\n \"_csrf\": csrf_token,\r\n \"d_user_id\": \"user_id\",\r\n \"t_user_id\": \"string\",\r\n \"c_user_id\": \"string\",\r\n \"e_user_id\": \"true\",\r\n \"f_user_id\": str(self.username),\r\n \"f_password\": str(self.password),\r\n \"Login\": \"Login\"},\r\n headers={'Content-type': 'application/x-www-form-urlencoded'}\r\n )\r\n if login_result.status_code == 200 and len(sess.cookies) > 0:\r\n return sess\r\n else:\r\n raise Exception(\"Login failed\")\r\n else:\r\n raise Exception(\"Login failed\")", "def create_session(self, session_expiration_datetime=None):\n session_expiration_datetime = session_expiration_datetime or datetime.now() + timedelta(seconds=5)\n session = JOHN | dict(session_id=\"5\", session_expiration_datetime=session_expiration_datetime)\n self.database.sessions.find_one.return_value = session", "def save_session(self, session):\n db = self.open()\n db[session.id] = session", "def _create_session(self):\n self.session = requests.Session() # pragma: no cover\n self.session.headers[\"Accept\"] = \"application/json\" # pragma: no cover\n if self.user: # pragma: no cover\n self.session.auth = (self.user, self.cred) # pragma: no cover", "def perform_session_create(self, environ: str, session_parameters: dict) -> Session:\n session_parameters[\"mounts\"] = []\n attach_context = self.client.start_session(environ, session_parameters)\n\n # TODO should we record some of the request\n # headers e.g. `REMOTE_ADDR`, `HTTP_USER_AGENT`, `HTTP_REFERER` for analytics?\n\n return Session.objects.create(\n project=self.project,\n url=attach_context.url,\n execution_id=attach_context.execution_id,\n client_class_id=self.client.class_id,\n )", "def test_new_session_create_with_auth_json(self):\n\n with self.app_sess1 as c:\n data = {\n \"token\": \"pretend_token\"\n }\n ret1 = c.post('/', data=json.dumps(data), headers={'Content-Type': 'application/json'})\n ret2 = c.get('/', headers={'X-Auth-Token': 'pretend_token'})\n\n self.assertEqual(ret1.data, ret2.data)", "def login(self):\n backend = self.backend\n self.session[backend.session_id_key] = self[\"id\"]\n self.session[backend.session_backend_key] = backend.session_backend_val\n self.session[backend.session_hash_key] = self._get_session_hash(\n self[\"password\"]\n )", "def _create_user_session(url: str, netid: str = 'superuser', new: bool = False, add_to_os: bool = True):\n\n # Create requests session\n session = requests.session()\n\n if new:\n netid = create_user(netid, add_to_os)\n\n session.get(url + f'/admin/auth/token/{netid}')\n r = session.get(url + \"/public/auth/whoami\")\n\n try:\n assert r.status_code == 200\n data = r.json()\n assert data[\"success\"] is True\n assert data[\"data\"] is not None\n assert data[\"error\"] is None\n data = copy.deepcopy(data)\n admin_for = data['data']['user']['admin_for']\n for i in admin_for:\n if i['name'] == 'Intro to OS':\n session.cookies['course'] = base64.urlsafe_b64encode(json.dumps(i).encode()).decode()\n except AssertionError as e:\n print_full_error(e, r)\n return session, netid", "def addsession(cls, session, username, passwd):\n sessionkey = cls.sessionkey(session)\n tmpdict = dict({'username': username, 'password': passwd})\n sessionmgr.update(dict({sessionkey: tmpdict}))", "def create_session(self):\n\t\ttry:\n\t\t\tself.session = session.DropboxSession(APP_KEY, APP_SECRET, ACCESS_TYPE)\n\t\texcept Exception, e:\n\t\t\tlogger.error('Exception at create_session')\n\t\t\tlogger.debug('*' + sys.exc_info()[0])", "def create(id = None, expires=None):\n\n\t# Init the data\n\tdData = {}\n\n\t# If we have an expires time\n\tif expires:\n\t\tdData['__expire'] = expires\n\n\t# Create a new Session using a UUID as the id\n\treturn _Session(id and id or uuid.uuid4().hex, dData)", "def auth_create_session(self) -> str:\n self.__logger.debug('Eva.auth_create_session called')\n return self.__http_client.auth_create_session()", "def create_session(hostname, username, password):\n return slycat.web.server.remote.create_session(hostname, username, password, None)", "def _session(self):\n if self.session is None:\n self.session = create_session(self.config, self.auth)\n return self.session", "def _insert_new_session():\n request = self._make_request()\n session_existing = self._set_up_session_in_Redis_and_makeOne( # noqa: F841\n request, session_id, session_dict={\"visited\": True}, **session_args\n )\n return request" ]
[ "0.7512262", "0.7276753", "0.72110564", "0.7114964", "0.70534384", "0.700323", "0.69665575", "0.6964039", "0.6911601", "0.68732077", "0.6832288", "0.6659833", "0.66411835", "0.66335", "0.65931505", "0.65842974", "0.6561036", "0.6559232", "0.65558994", "0.6549769", "0.65234375", "0.6514132", "0.6512482", "0.6501336", "0.64953685", "0.6469691", "0.64640224", "0.6462134", "0.6431204", "0.6402201" ]
0.74275583
1
Given the name of a region and a mimic internal service ID, get a resource for that service.
def service_with_region(self, region_name, service_id, base_uri): key = (region_name, service_id) if key in self.uri_prefixes: return self.uri_prefixes[key].resource_for_region( self.uri_for_service(region_name, service_id, base_uri))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_region(self, region_id):\n raise exception.NotImplemented() # pragma: no cover", "def GetResourceFromNamespacedName(namespaced_name, resource_type):\n service = ServiceFns[resource_type]()\n req = GetResourceFns[resource_type](name=namespaced_name)\n response = service.Get(req)\n\n return response", "def get_region(self, region, namespace, region_id, **filters):\n filters['namespace'] = namespace\n return self.get_resource('data/wow/region/{0}', region, *[region_id], **filters)", "def _get_resource(\n session: Optional[boto3.Session] = None, region: Optional[str] = None\n) -> S3ServiceResource:\n return (\n session.resource(\"s3\") if session else boto3.resource(\"s3\", region_name=region)\n )", "def get_region_by_name(self, name):\n raise NotImplementedError()", "def get_region_id(region_name):\n region_id = None\n all_region = api_get('region')\n if all_region.get('status') == 200:\n region_data = all_region.get('result')\n for region in region_data:\n if region_data[region].get('name') == region_name:\n region_id = region_data[region].get('DCID')\n return region_id", "def get_operation(project_id: str, region: str, operation_id: str) -> Operation:\n return get_operation_by_name(\n f\"projects/{project_id}/locations/{region}/operations/{operation_id}\"\n )", "def _get_client(self, region_name):\n if region_name not in self._clients[self.SERVICE_NAME]:\n self._clients[self.SERVICE_NAME][region_name] = self._create_client()\n\n return self._clients[self.SERVICE_NAME][region_name]", "def get_service(self, service_id):\n raise exception.NotImplemented() # pragma: no cover", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n additional_locations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceAdditionalLocationArgs']]]]] = None,\n certificates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceCertificateArgs']]]]] = None,\n client_certificate_enabled: Optional[pulumi.Input[bool]] = None,\n delegation: Optional[pulumi.Input[pulumi.InputType['ServiceDelegationArgs']]] = None,\n developer_portal_url: Optional[pulumi.Input[str]] = None,\n gateway_disabled: Optional[pulumi.Input[bool]] = None,\n gateway_regional_url: Optional[pulumi.Input[str]] = None,\n gateway_url: Optional[pulumi.Input[str]] = None,\n hostname_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceHostnameConfigurationArgs']]] = None,\n identity: Optional[pulumi.Input[pulumi.InputType['ServiceIdentityArgs']]] = None,\n location: Optional[pulumi.Input[str]] = None,\n management_api_url: Optional[pulumi.Input[str]] = None,\n min_api_version: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n notification_sender_email: Optional[pulumi.Input[str]] = None,\n policy: Optional[pulumi.Input[pulumi.InputType['ServicePolicyArgs']]] = None,\n portal_url: Optional[pulumi.Input[str]] = None,\n private_ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n protocols: Optional[pulumi.Input[pulumi.InputType['ServiceProtocolsArgs']]] = None,\n public_ip_address_id: Optional[pulumi.Input[str]] = None,\n public_ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n public_network_access_enabled: Optional[pulumi.Input[bool]] = None,\n publisher_email: Optional[pulumi.Input[str]] = None,\n publisher_name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n scm_url: Optional[pulumi.Input[str]] = None,\n security: Optional[pulumi.Input[pulumi.InputType['ServiceSecurityArgs']]] = None,\n sign_in: Optional[pulumi.Input[pulumi.InputType['ServiceSignInArgs']]] = None,\n sign_up: Optional[pulumi.Input[pulumi.InputType['ServiceSignUpArgs']]] = None,\n sku_name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tenant_access: Optional[pulumi.Input[pulumi.InputType['ServiceTenantAccessArgs']]] = None,\n virtual_network_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceVirtualNetworkConfigurationArgs']]] = None,\n virtual_network_type: Optional[pulumi.Input[str]] = None,\n zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'Service':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ServiceState.__new__(_ServiceState)\n\n __props__.__dict__[\"additional_locations\"] = additional_locations\n __props__.__dict__[\"certificates\"] = certificates\n __props__.__dict__[\"client_certificate_enabled\"] = client_certificate_enabled\n __props__.__dict__[\"delegation\"] = delegation\n __props__.__dict__[\"developer_portal_url\"] = developer_portal_url\n __props__.__dict__[\"gateway_disabled\"] = gateway_disabled\n __props__.__dict__[\"gateway_regional_url\"] = gateway_regional_url\n __props__.__dict__[\"gateway_url\"] = gateway_url\n __props__.__dict__[\"hostname_configuration\"] = hostname_configuration\n __props__.__dict__[\"identity\"] = identity\n __props__.__dict__[\"location\"] = location\n __props__.__dict__[\"management_api_url\"] = management_api_url\n __props__.__dict__[\"min_api_version\"] = min_api_version\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"notification_sender_email\"] = notification_sender_email\n __props__.__dict__[\"policy\"] = policy\n __props__.__dict__[\"portal_url\"] = portal_url\n __props__.__dict__[\"private_ip_addresses\"] = private_ip_addresses\n __props__.__dict__[\"protocols\"] = protocols\n __props__.__dict__[\"public_ip_address_id\"] = public_ip_address_id\n __props__.__dict__[\"public_ip_addresses\"] = public_ip_addresses\n __props__.__dict__[\"public_network_access_enabled\"] = public_network_access_enabled\n __props__.__dict__[\"publisher_email\"] = publisher_email\n __props__.__dict__[\"publisher_name\"] = publisher_name\n __props__.__dict__[\"resource_group_name\"] = resource_group_name\n __props__.__dict__[\"scm_url\"] = scm_url\n __props__.__dict__[\"security\"] = security\n __props__.__dict__[\"sign_in\"] = sign_in\n __props__.__dict__[\"sign_up\"] = sign_up\n __props__.__dict__[\"sku_name\"] = sku_name\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tenant_access\"] = tenant_access\n __props__.__dict__[\"virtual_network_configuration\"] = virtual_network_configuration\n __props__.__dict__[\"virtual_network_type\"] = virtual_network_type\n __props__.__dict__[\"zones\"] = zones\n return Service(resource_name, opts=opts, __props__=__props__)", "def _get_service(self, service_name):\n if self._service:\n return self._service\n res = self._cc.services().get_by_name(service_name, name='label')\n self._service = res.resource\n return self._service", "def get_resource_from_name(name):\n return _name_to_resources.get(name, None)", "def get_resource(self, service_name, resource_name, base_class=None):\n classpath = self.build_classpath(base_class)\n service = self.services.get(service_name, {})\n resources = service.get('resources', {})\n resource_options = resources.get(resource_name, {})\n resource_class = resource_options.get(classpath, None)\n\n if not resource_class:\n msg = \"Resource '{0}' for {1} is not present in the cache.\"\n raise NotCached(msg.format(\n resource_name,\n service_name\n ))\n\n return resource_class", "def uri_for_service(self, region, service_id, base_uri):\n return str(URLPath.fromString(base_uri)\n .child(\"service\").child(region).child(service_id).child(\"\"))", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n arn: Optional[pulumi.Input[str]] = None,\n auto_scaling_configuration_arn: Optional[pulumi.Input[str]] = None,\n encryption_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceEncryptionConfigurationArgs']]] = None,\n health_check_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceHealthCheckConfigurationArgs']]] = None,\n instance_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceInstanceConfigurationArgs']]] = None,\n network_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceNetworkConfigurationArgs']]] = None,\n observability_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceObservabilityConfigurationArgs']]] = None,\n service_id: Optional[pulumi.Input[str]] = None,\n service_name: Optional[pulumi.Input[str]] = None,\n service_url: Optional[pulumi.Input[str]] = None,\n source_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceSourceConfigurationArgs']]] = None,\n status: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Service':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ServiceState.__new__(_ServiceState)\n\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"auto_scaling_configuration_arn\"] = auto_scaling_configuration_arn\n __props__.__dict__[\"encryption_configuration\"] = encryption_configuration\n __props__.__dict__[\"health_check_configuration\"] = health_check_configuration\n __props__.__dict__[\"instance_configuration\"] = instance_configuration\n __props__.__dict__[\"network_configuration\"] = network_configuration\n __props__.__dict__[\"observability_configuration\"] = observability_configuration\n __props__.__dict__[\"service_id\"] = service_id\n __props__.__dict__[\"service_name\"] = service_name\n __props__.__dict__[\"service_url\"] = service_url\n __props__.__dict__[\"source_configuration\"] = source_configuration\n __props__.__dict__[\"status\"] = status\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n return Service(resource_name, opts=opts, __props__=__props__)", "def get_service(\n service_name: str,\n version: str = \"v1\",\n configuration: Configuration = None,\n secrets: Secrets = None,\n) -> Resource:\n return client(service_name, version=version, secrets=secrets)", "def client(service_name, region_name=None):\n return session.client(service_name=service_name, region_name=region_name)", "def cognitive_service_resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cognitive_service_resource_id\")", "def cognitive_service_resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cognitive_service_resource_id\")", "def cognitive_service_resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cognitive_service_resource_id\")", "def _get_resource_provider(self, uuid):\n resp = self.get(\"/resource_providers/%s\" % uuid)\n if resp.status_code == 200:\n data = resp.json()\n return objects.ResourceProvider(\n uuid=uuid,\n name=data['name'],\n generation=data['generation'],\n )\n elif resp.status_code == 404:\n return None\n else:\n msg = _LE(\"Failed to retrieve resource provider record from \"\n \"placement API for UUID %(uuid)s. \"\n \"Got %(status_code)d: %(err_text)s.\")\n args = {\n 'uuid': uuid,\n 'status_code': resp.status_code,\n 'err_text': resp.text,\n }\n LOG.error(msg, args)", "def _get_resource_depending_on_node_name(self, node_name: str) -> GCPResource:\n return self.resources[GCPNodeType.name_to_type(node_name)]", "def resource_by_id(resource_type, resource_id, methods=[\"GET\"]):\n token = validate_auth()\n url = f\"{current_app.config.get('MAP_API')}{resource_type}/{resource_id}\"\n resp = requests.get(url, auth=BearerAuth(token))\n try:\n resp.raise_for_status()\n except requests.exceptions.HTTPError as err:\n abort(err.response.status_code, err)\n\n return jsonify(resp.json())", "def get_client(access_key, secret_key, region='eu-west-1', service='ec2'):\n return boto3.client(\n service,\n aws_access_key_id=access_key,\n aws_secret_access_key=secret_key,\n region_name=region\n )", "def getService(name):\n return Service.getService(name)", "def __init__(__self__, *,\n cognitive_service_region: Optional[pulumi.Input[str]] = None,\n cognitive_service_resource_id: Optional[pulumi.Input[str]] = None,\n cognitive_service_subscription_key: Optional[pulumi.Input[str]] = None,\n default_locale: Optional[pulumi.Input[str]] = None,\n id: Optional[pulumi.Input[str]] = None,\n provider_name: Optional[pulumi.Input[str]] = None):\n if cognitive_service_region is not None:\n pulumi.set(__self__, \"cognitive_service_region\", cognitive_service_region)\n if cognitive_service_resource_id is not None:\n pulumi.set(__self__, \"cognitive_service_resource_id\", cognitive_service_resource_id)\n if cognitive_service_subscription_key is not None:\n pulumi.set(__self__, \"cognitive_service_subscription_key\", cognitive_service_subscription_key)\n if default_locale is not None:\n pulumi.set(__self__, \"default_locale\", default_locale)\n if id is not None:\n pulumi.set(__self__, \"id\", id)\n if provider_name is not None:\n pulumi.set(__self__, \"provider_name\", provider_name)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Service':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = ServiceArgs.__new__(ServiceArgs)\n\n __props__.__dict__[\"correlation_scheme\"] = None\n __props__.__dict__[\"default_move_cost\"] = None\n __props__.__dict__[\"etag\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"partition_description\"] = None\n __props__.__dict__[\"placement_constraints\"] = None\n __props__.__dict__[\"provisioning_state\"] = None\n __props__.__dict__[\"service_dns_name\"] = None\n __props__.__dict__[\"service_kind\"] = None\n __props__.__dict__[\"service_load_metrics\"] = None\n __props__.__dict__[\"service_package_activation_mode\"] = None\n __props__.__dict__[\"service_placement_policies\"] = None\n __props__.__dict__[\"service_type_name\"] = None\n __props__.__dict__[\"system_data\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"type\"] = None\n return Service(resource_name, opts=opts, __props__=__props__)", "def get_resource(self, name: str) -> ResourceBase:\n resource = self.get_payload(name)\n if not isinstance(resource, ResourceBase):\n raise TypeError(\"Resource was expected but not found\")\n return resource", "def get_object(cls, api_token, name):\n region = cls(token=api_token, name=name)\n region.load()\n return region", "def get_service(self, project_id, service_id):\n return self.storage_controller.get_service(project_id, service_id)" ]
[ "0.6456018", "0.63319135", "0.617736", "0.6104381", "0.5876853", "0.5802578", "0.5799496", "0.5796975", "0.577786", "0.57154256", "0.569162", "0.56586933", "0.5637189", "0.56066823", "0.55764836", "0.5558472", "0.5557971", "0.55479217", "0.55479217", "0.55479217", "0.55396986", "0.5523898", "0.5522201", "0.5510637", "0.54962796", "0.5481744", "0.5473578", "0.5452595", "0.54419947", "0.5435245" ]
0.7848388
0
Generate a URI prefix for a given region and service ID.
def uri_for_service(self, region, service_id, base_uri): return str(URLPath.fromString(base_uri) .child("service").child(region).child(service_id).child(""))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def service_with_region(self, region_name, service_id, base_uri):\n key = (region_name, service_id)\n if key in self.uri_prefixes:\n return self.uri_prefixes[key].resource_for_region(\n self.uri_for_service(region_name, service_id, base_uri))", "def generate_router_uri(project_id, region, router_name):\n return 'projects/{}/regions/{}/routers/{}'.format(\n project_id,\n region,\n router_name\n )", "def get_url_prefix(self, style: str = None) -> str:\n if not self.url_prefix:\n return \"\"\n major, minor, build = self.api_version.split('.')\n prefix = self.url_prefix.format(\n api_version=self.api_version,\n api_major=major,\n api_minor=minor,\n api_build=build\n ).strip('/')\n style = (style or '').lower()\n if style == 'django':\n # Django requires the slash at the end\n return rf\"^{prefix}/\"\n elif style in ('flask', 'starlette'):\n # Flask and Starlette need the slash in front\n return f\"/{prefix}\"\n # If no format is specified, return only the bare prefix\n return prefix", "def get_region_url(project_id, region):\n assert is_valid_project_id(project_id), project_id\n assert is_valid_region(region), region\n return 'https://www.googleapis.com/compute/v1/projects/%s/regions/%s' % (\n project_id, region)", "def generate_uri(uri):\n return uri[:-5] + uuid.uuid4().hex", "def _getPrefix(self, namespaceURI):\r\n prefixDict = self._getPrefixDict()\r\n if prefixDict.has_key(namespaceURI):\r\n prefix = prefixDict[namespaceURI]\r\n else:\r\n prefix = 'ns1'\r\n while prefix in prefixDict.values():\r\n prefix = 'ns%d' %int(prefix[-1]) + 1\r\n prefixDict[namespaceURI] = prefix\r\n return prefix", "def new_url(**kwargs):\n # To create the URL, we need to take the format string and return it with no params\n url_base = \"/axapi/v3/gslb/zone/{zone_name}/service/{service-port}+{service-name}\"\n f_dict = {}\n f_dict[\"service-port\"] = \"\"\n f_dict[\"service-name\"] = \"\"\n f_dict[\"zone_name\"] = kwargs[\"zone_name\"]\n\n return url_base.format(**f_dict)", "def existing_url(**kwargs):\n # Build the format dictionary\n url_base = \"/axapi/v3/gslb/zone/{zone_name}/service/{service-port}+{service-name}\"\n f_dict = {}\n f_dict[\"service-port\"] = kwargs[\"service-port\"]\n f_dict[\"service-name\"] = kwargs[\"service-name\"]\n f_dict[\"zone_name\"] = kwargs[\"zone_name\"]\n\n return url_base.format(**f_dict)", "def prefix_id(self, name):\n if \":\" in name: return name\n return self.prefix + \":\" + name", "def build_endpoint_prefix(self):\n if not sanity.validate_api_hostname(self.api_host):\n error_message = \"Bad API hostname: %s\" % self.api_host\n raise CloudPassageValidation(error_message)\n prefix = \"https://\" + self.api_host + \":\" + str(self.api_port)\n return prefix", "def _form_service_key(self, service_name, service_addr):\n return '/'.join((service_name, service_addr))", "def _uriPrefix(element):\n i = element.tag.find('}')\n if i < 0:\n return \"\"\n return element.tag[:i+1]", "def get_namespace_str(namespace: Namespace, region: Region):\n\n return namespace.value.format(region.value)", "def generate_cali_interface_name(prefix, ep_id):\n if len(prefix) > 4:\n raise ValueError('Prefix must be 4 characters or less.')\n return prefix + ep_id[:11]", "def base_url(klass, space_id=None, **kwargs):\n\n if space_id is None:\n space_id = ''\n return \"spaces/{0}\".format(space_id)", "def make_id_path(base_path, id_) -> Path:\n\n return base_path / (ID_FMT.format(id=id_))", "def add_agr_prefix_by_species_taxon(identifier, taxon_id):\n species_dict = {\n 7955: 'ZFIN:',\n 6239: 'WB:',\n 10090: '', # No MGI prefix\n 10116: '', # No RGD prefix\n 559292: 'SGD:',\n 4932: 'SGD:',\n 7227: 'FB:',\n 9606: '', # No HGNC prefix\n 2697049: '' # No SARS-CoV-2 prefix\n }\n\n new_identifier = species_dict[taxon_id] + identifier\n\n return new_identifier", "def UriStrFor(iterated_uri, obj):\n return '%s://%s/%s' % (iterated_uri.scheme, obj.bucket.name, obj.name)", "def generate_name(prefix):\n suffix = generate_uuid()[:8]\n return '{0}_{1}'.format(prefix, suffix)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n cidr: Optional[pulumi.Input[str]] = None,\n commissioning_enabled: Optional[pulumi.Input[bool]] = None,\n internet_advertising_disabled: Optional[pulumi.Input[bool]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n parent_custom_ip_prefix_id: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n roa_validity_end_date: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n wan_validation_signed_message: Optional[pulumi.Input[str]] = None,\n zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'Prefix':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _PrefixState.__new__(_PrefixState)\n\n __props__.__dict__[\"cidr\"] = cidr\n __props__.__dict__[\"commissioning_enabled\"] = commissioning_enabled\n __props__.__dict__[\"internet_advertising_disabled\"] = internet_advertising_disabled\n __props__.__dict__[\"location\"] = location\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"parent_custom_ip_prefix_id\"] = parent_custom_ip_prefix_id\n __props__.__dict__[\"resource_group_name\"] = resource_group_name\n __props__.__dict__[\"roa_validity_end_date\"] = roa_validity_end_date\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"wan_validation_signed_message\"] = wan_validation_signed_message\n __props__.__dict__[\"zones\"] = zones\n return Prefix(resource_name, opts=opts, __props__=__props__)", "def generate_network_uri(project_id, network):\n return 'projects/{}/global/networks/{}'.format(project_id, network)", "def _make_url(self, url_part, blueprint_prefix):\n parts = (blueprint_prefix, self.prefix, url_part)\n return ''.join(_ for _ in parts if _)", "def __extend_uri(prefixes, short):\n for prefix in prefixes:\n if short.startswith(prefix):\n return short.replace(prefix + ':', prefixes[prefix])\n return short", "def _id_for_index(prefix, index):\r\n return \"%s%d\" % (prefix, index + 1)", "def generate_arn(self):\n if self._generate_arn is None:\n self._generate_arn = functools.partial(\n generate_arn,\n self.resource_type.arn_service or self.resource_type.service,\n region=not self.resource_type.global_resource and self.config.region or \"\",\n account_id=self.account_id,\n resource_type=self.resource_type.arn_type,\n separator=self.resource_type.arn_separator)\n return self._generate_arn", "def _uri(helper):\n return '/'.join((\n helper.context_meta['server_uri'],\n 'servicesNS',\n 'nobody',\n 'Splunk_TA_paloalto',\n 'storage',\n 'collections',\n 'data',\n 'minemeldfeeds'))", "def resource_prefix(self):", "def make_doi(doi_prefix: int):\n\n return f\"10.{doi_prefix}/{str(uuid.uuid4())}\"", "def build_prefix(cls, endpoint, method, size):\n return f\"DHT{cls.MAJOR}{cls.MINOR}{size:06x}{endpoint}{method}\".encode(\"utf8\")", "def _format_id(ns, id):\n label = '%s:%s' % (ns, id)\n label = label.replace(' ', '_')\n url = get_identifiers_url(ns, id)\n return (label, url)" ]
[ "0.66818917", "0.6075172", "0.59251356", "0.5835961", "0.5789605", "0.57216346", "0.5645432", "0.56307805", "0.5608411", "0.54919595", "0.54798126", "0.54721117", "0.5455161", "0.5435575", "0.5394286", "0.536875", "0.53580153", "0.53372353", "0.53315735", "0.5327415", "0.5322124", "0.52807623", "0.5280052", "0.52528507", "0.52393365", "0.52391773", "0.5230094", "0.51915765", "0.518256", "0.51760584" ]
0.71465874
0
set_time_period takes in a data frame and calculates the time period (ThanksGiving, WinterBreak, SummerBreak or Not Holiday)for each record returns a modified data frame
def set_time_period(df): for i in range(0,len(df['Day'])): if (((df.loc[i,'Month']==11) & (df.loc[i,'Day']<=30) & (df.loc[i,'Day']>=27))\ | ((df.loc[i,'Month']==12) & (df.loc[i,'Day']<=3) & (df.loc[i,'Day']>=1)) ): df.loc[i,'Holiday'] = 'ThanksGiving' TGDate = df.loc[i,'week_ending']+timedelta(days=-7) df.loc[df['week_ending'] == TGDate,'Holiday'] = 'ThanksGiving' elif ((df.loc[i,'Month']==1) & (df.loc[i,'Day']<=11) & (df.loc[i,'Day']>=5)): df.loc[i,'Holiday'] = 'WinterBreak' WBDate = df.loc[i,'week_ending']+timedelta(days=-7) df.loc[df['week_ending'] == WBDate, 'Holiday'] = 'WinterBreak' elif ((df.loc[i,'Month']==7) | (df.loc[i,'Month']==8)): df.loc[i, 'Holiday'] = 'SummerBreak' else: df.loc[i, 'Holiday'] = 'Not Holiday' return(df)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def modify_datetime_train(df):\n\n df['pickup_hour'] = pd.to_datetime(df['pickup_datetime']).dt.hour\n\n df['dropoff_hour'] = pd.to_datetime(df['dropoff_datetime']).dt.hour\n\n df['pickup_minute'] = pd.to_datetime(df['pickup_datetime']).dt.minute\n\n df['dropoff_minute'] = pd.to_datetime(df['dropoff_datetime']).dt.minute\n\n df['pickup_hour_sin'], df['pickup_hour_cos'] = convert_time_sin_cos(df, 'pickup_hour')\n\n df['dropoff_hour_sin'], df['dropoff_hour_cos'] = convert_time_sin_cos(df, 'dropoff_hour')\n\n #split datetime between dates and time\n #using normalize even though it gives us 0:00 time, but the resulting column is a datetime object,\n #which allows us to further process for day of week\n df['pickup_date'] = pd.to_datetime(df['pickup_datetime']).dt.date\n\n df['dropoff_date'] = pd.to_datetime(df['dropoff_datetime']).dt.date\n\n #create day of the week for both pickup date and dropoff dates\n df['pickup_day'] = pd.to_datetime(df['pickup_datetime']).dt.weekday\n\n df['dropoff_day'] = pd.to_datetime(df['dropoff_datetime']).dt.weekday\n\n #get week of year to capture effects of holidays\n df['pickup_weekofyear'] = pd.to_datetime(df['pickup_datetime']).dt.weekofyear\n\n df[\"month\"] = pd.to_datetime(df['pickup_datetime']).dt.month\n\n df[\"year\"] = pd.to_datetime(df['pickup_datetime']).dt.year\n #one hot encode day of the week for both pickup and dropoff\n df = pd.get_dummies(df, columns=['pickup_day', 'dropoff_day'])\n\n return df", "def add_time_period(df):\n\n # determine in which half hour period of the day the \n # predicted time of arrival falls\n\n interval = df.iloc[0].planned_arrival // 1800 \n\n # find string representation of period from dict. mapping (top)\n\n inverval_string = interval_map[interval]\n\n # add the feature\n\n df['TIME_PERIOD_ARRIVAL'] = inverval_string\n\n # set the dtype\n\n df.TIME_PERIOD_ARRIVAL = df.TIME_PERIOD_ARRIVAL.astype('category') \n\n return df", "def set_reference_period(game_data, settings):\n\n since_date = None\n until_date = None\n if settings.today:\n today_date = datetime.date.today()\n since_date = today_date.strftime(DATETIME_FORMAT)\n until_date = (today_date + datetime.timedelta(1)).strftime(\n DATETIME_FORMAT)\n else:\n if settings.since:\n since_date = dateutil.parser.parse(settings.since).strftime(\n DATETIME_FORMAT)\n\n if settings.until:\n until_date = dateutil.parser.parse(settings.until).strftime(\n DATETIME_FORMAT)\n\n game_data.update(\n since=since_date,\n until=until_date,)", "def modify_datetime_test(df):\n\n df['pickup_hour'] = pd.to_datetime(df['pickup_datetime']).dt.hour\n df['pickup_minute'] = pd.to_datetime(df['pickup_datetime']).dt.minute\n df['pickup_hour_sin'], df['pickup_hour_cos'] = convert_time_sin_cos(df, 'pickup_hour')\n df['pickup_date'] = pd.to_datetime(df['pickup_datetime']).dt.date\n df['pickup_day'] = pd.to_datetime(df['pickup_datetime']).dt.weekday\n df['pickup_day'] = pd.to_datetime(df['pickup_datetime']).dt.weekday\n df['pickup_weekofyear'] = pd.to_datetime(df['pickup_datetime']).dt.weekofyear\n df[\"month\"] = pd.to_datetime(df['pickup_datetime']).dt.month\n df[\"year\"] = pd.to_datetime(df['pickup_datetime']).dt.year\n return df", "def fix_time_fields(self):\n time_fields = {\"Time of day\": lambda time: time.hour, \"Time of year (month)\": lambda time: time.month}\n for time_field in time_fields.keys():\n for i in range(self.df.shape[0]):\n value = self.df[time_field][i]\n if type(value) is datetime.time or type(value) is datetime.datetime:\n self.df[time_field].loc[i] = time_fields[time_field](value)", "def filter_for_trade_time(df: pd.DataFrame, group_var: str = \"symbol\", time_var: str = \"timestamp\") -> pd.DataFrame:\n # if we only observe cash in the balances, that means the game has only just kicked off or they haven't ordered.\n if set(df[group_var].unique()) == {'Cash'}:\n min_time = df[time_var].min()\n max_time = df[time_var].max()\n trade_days_df = get_trading_calendar(min_time, max_time)\n if trade_days_df.empty:\n return df\n\n # this bit of logic checks whether any trading hours have happened, if if the user hasn't ordered\n trade_days_df = trade_days_df[\n (trade_days_df[\"market_close\"] >= min_time) & (trade_days_df[\"market_open\"] <= max_time)]\n if trade_days_df.empty:\n return df\n\n days = df[time_var].dt.normalize().unique()\n schedule_df = get_trading_calendar(min(days).date(), max(days).date())\n schedule_df['start'] = schedule_df['market_open'].apply(datetime_to_posix)\n schedule_df['end'] = schedule_df['market_close'].apply(datetime_to_posix)\n df['timestamp_utc'] = df['timestamp'].dt.tz_convert(\"UTC\")\n df['timestamp_epoch'] = df['timestamp_utc'].astype('int64') // 1e9\n df[\"mask\"] = False\n for start, end in zip(schedule_df['start'], schedule_df['end']):\n df[\"mask\"] = df[\"mask\"] | mask_time_creator(df, start, end)\n df = df[df[\"mask\"]]\n return df.drop([\"timestamp_utc\", \"timestamp_epoch\", \"mask\"], axis=1)", "def periodCheck(data):", "def set_well_boundary(dat, excel_file, sheet_name, well_name,\n dates, parameters=['Flow (t/h)', 'WHP (barg)'],\n t_step='day', temp=75., decimate=False,\n debug=0):\n # Read in excel file\n df = pd.read_excel(excel_file, header=[0, 1], sheetname=sheet_name)\n # All flow info is local time\n df.index = df.index.tz_localize('Pacific/Auckland')\n print('Flow data tz set to: {}'.format(df.index.tzinfo))\n # Truncate to desired dates\n start = dates[0]\n end = dates[1]\n df = df.truncate(before=start, after=end)\n dtos = df.xs((well_name, parameters[0]), level=(0, 1),\n axis=1).index.to_pydatetime()\n flows = df.xs((well_name, parameters[0]), level=(0, 1), axis=1)\n # Convert t/h to kg/sec (injection is negative)\n flows /= -3.6\n flow_list = flows.values.tolist()\n # Flatten this for some dumb (self-imposed) reason\n flow_list = [lst[0] for lst in flow_list]\n pres = df.xs((well_name, parameters[1]), level=(0, 1), axis=1)\n # Convert bar to MPa\n pres /= 10\n pres_list = pres.values.tolist()\n pres_list = [lst[0] for lst in pres_list]\n # Convert dtos to elapsed time\n if t_step == 'min':\n times = [(dt - dtos[0]).total_seconds() / 60. for dt in dtos]\n elif t_step == 'day':\n times = [(dt - dtos[0]).total_seconds() / 86400. for dt in dtos]\n well_zones = [key for key in dat.zone.keys() if type(key) == str]\n zone_list = [zone for zone in well_zones if zone.startswith(well_name)]\n if decimate:\n times = times[::decimate]\n flow_list = flow_list[::decimate]\n pres_list = pres_list[::decimate]\n if debug > 0:\n plt.plot(times, flow_list)\n plt.plot(times, pres_list)\n plt.show()\n temps = [temp for i in range(len(flow_list))]\n # Create boundary\n flow_list.insert(0, 'dsw')\n pres_list.insert(0, 'pw')\n temps.insert(0, 'ft')\n bound = fdata.fboun(type='ti_linear', zone=zone_list, times=times,\n variable=[flow_list, pres_list, temps])\n # Add it\n dat.add(bound)\n return dat", "def test_find_df_period(self):\n test_search_df = pd.read_csv(DF_PATH)\n result_1 = find_df_period(test_search_df, 'pickup_datetime', 6)\n p_time_periods_1 = result_1['time_period'].tolist()\n p_intervals_1 = [2, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4]\n\n result_2 = find_df_period(test_search_df, 'pickup_datetime', 4)\n p_time_periods_2 = result_2['time_period'].tolist()\n p_intervals_2 = [1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2]\n\n self.assertTrue(p_time_periods_1 == p_intervals_1)\n self.assertTrue(p_time_periods_2 == p_intervals_2)", "def _set_date_times(self):\n if self._report_key in (ReportTypes.SEARCH_DETAIL_REPORT, ReportTypes.SEARCH_BODY_REPORT):\n self._report_data['searchDateTime'] = Report._to_report_datetime(self._report_data['searchDateTime'])\n if self._report_data['totalResultsSize'] > 0:\n for detail in self._report_data['details']:\n detail['createDateTime'] = Report._to_report_datetime(detail['createDateTime'])\n if detail.get('declaredDateTime'):\n detail['declaredDateTime'] = Report._to_report_datetime(detail['declaredDateTime'], False)\n declared_value = str(detail['declaredValue'])\n if declared_value.isnumeric() and declared_value != '0':\n detail['declaredValue'] = '$' + '{:0,.2f}'.format(float(declared_value))\n else:\n detail['declaredValue'] = ''\n if detail.get('description') and detail['description'].get('engineerDate'):\n if detail['description']['engineerDate'] == '0001-01-01':\n detail['description']['engineerDate'] = ''\n else:\n detail['description']['engineerDate'] = \\\n Report._to_report_datetime(detail['description']['engineerDate'], False)\n else:\n detail['description']['engineerDate'] = ''\n if detail.get('location') and detail['location'].get('taxExpiryDate'):\n detail['location']['taxExpiryDate'] = \\\n Report._to_report_datetime(detail['location']['taxExpiryDate'], False)\n elif self._report_key == ReportTypes.MHR_REGISTRATION:\n reg = self._report_data\n reg['createDateTime'] = Report._to_report_datetime(reg['createDateTime'])\n if reg.get('description') and reg['description'].get('engineerDate'):\n if reg['description']['engineerDate'] == '0001-01-01':\n reg['description']['engineerDate'] = ''\n else:\n reg['description']['engineerDate'] = \\\n Report._to_report_datetime(reg['description']['engineerDate'], False)\n else:\n reg['description']['engineerDate'] = ''\n if reg.get('location') and reg['location'].get('taxExpiryDate'):\n reg['location']['taxExpiryDate'] = Report._to_report_datetime(reg['location']['taxExpiryDate'], False)\n elif self._report_key in (ReportTypes.MHR_TRANSFER, ReportTypes.MHR_EXEMPTION,\n ReportTypes.MHR_TRANSPORT_PERMIT, ReportTypes.MHR_NOTE,\n ReportTypes.MHR_ADMIN_REGISTRATION):\n reg = self._report_data\n reg['createDateTime'] = Report._to_report_datetime(reg['createDateTime'])\n if reg.get('declaredValue'):\n declared_value = str(reg['declaredValue'])\n if declared_value.isnumeric() and declared_value != '0':\n reg['declaredValue'] = '$' + '{:0,.2f}'.format(float(declared_value))\n else:\n reg['declaredValue'] = ''\n if reg.get('transferDate'):\n reg['transferDate'] = Report._to_report_datetime(reg['transferDate'], False)\n if self._report_key == ReportTypes.MHR_TRANSPORT_PERMIT and reg.get('newLocation'):\n reg['location'] = reg.get('newLocation')\n if reg.get('location') and reg['location'].get('taxExpiryDate'):\n reg['location']['taxExpiryDate'] = Report._to_report_datetime(reg['location']['taxExpiryDate'],\n False)", "def single_curtailment_or_shift_each_day_between_12_and_14_pm(\n start: datetime, end: datetime, resolution: timedelta\n) -> DataFrame:\n imbalance_start_time = \"12:00\"\n imbalance_end_time = \"14:00\"\n imbalance_value = -2 # MW\n imbalance_price_between_2_and_3_am = 10 # EUR/MWh\n imbalance_price_otherwise = 5 # EUR/MWh\n df = initialize_df(\n columns=[\"Imbalance (in MW)\", \"Price (in EUR/MWh)\"],\n start=start,\n end=end,\n resolution=resolution,\n )\n df[\"Imbalance (in MW)\"] = 0\n df[\"Imbalance (in MW)\"].iloc[\n df.index.indexer_between_time(\n start_time=imbalance_start_time,\n end_time=imbalance_end_time,\n include_end=False,\n )\n ] = imbalance_value\n df[\"Price (in EUR/MWh)\"] = imbalance_price_otherwise\n df[\"Price (in EUR/MWh)\"].iloc[\n df.index.indexer_between_time(\n start_time=imbalance_start_time,\n end_time=imbalance_end_time,\n include_end=False,\n )\n ] = imbalance_price_between_2_and_3_am\n return df", "def augment(steps, add_month=True, add_year=True,\n add_weekend=True, add_holiday=True):\n\n temp = aggregate_steps(steps)\n temp.date = pd.to_datetime(temp.date, format='%Y-%m-%d')\n\n # Add holidays and weekends\n is_weekend = []\n is_holiday = []\n nl_holidays = holidays.Netherlands()\n for i in temp.date:\n if i.weekday() in [5, 6]:\n is_weekend.append(1)\n else:\n is_weekend.append(0)\n\n if i in nl_holidays:\n is_holiday.append(1)\n else:\n is_holiday.append(0)\n\n if add_weekend:\n temp['weekend'] = is_weekend\n if add_holiday:\n temp['holiday'] = is_holiday\n\n # Convert time to sin and cos to keep track of periodicity\n day = 24*60*60\n month = 30.416*day\n year = 365.2425*day\n\n timestamp_s = temp.date.map(datetime.datetime.timestamp)\n if add_month:\n temp['Month sin'] = np.sin(timestamp_s * (2 * np.pi / month))\n temp['Month cos'] = np.cos(timestamp_s * (2 * np.pi / month))\n if add_year:\n temp['Year sin'] = np.sin(timestamp_s * (2 * np.pi / year))\n temp['Year cos'] = np.cos(timestamp_s * (2 * np.pi / year))\n\n # Remove the date and day of the week columns\n temp = temp.drop('date', axis=1)\n temp = temp.drop('dow', axis=1)\n return temp", "def process_data(self, df_data, breakdown='weekly'):\n df_data['time'] = df_data['time'].apply(self.remove_time_zone)\n df_data['grid_coord'] = df_data['grid_coord'].astype(str)\n # return self.process_chunk((self.remove_time_zone('2019-04-15T00:00:00-04:00'), self.remove_time_zone('2019-04-16T00:00:00-04:00')), df_data)\n # get weekly/daily time chunks within cleanedInputData\n week_days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']\n start = min(df_data['time']) #str\n end = max(df_data['time']) #str\n start_date = iso8601.parse_date(start).replace(hour=0, minute=0, second=0)\n end_date = (iso8601.parse_date(end) + timedelta(days=1)).replace(hour=0, minute=0, second=0)\n if breakdown == \"weekly\":\n dates = pd.date_range(start_date, end_date, freq='W-'+week_days[start_date.weekday()])\n dates = [e.isoformat() for e in dates] + [end_date.isoformat()]\n else: # breakdown == \"daily\"\n dates = pd.date_range(start_date, end_date, freq='d')\n dates = [e.isoformat() for e in dates]\n time_chunks = []\n for left, right in zip(dates, dates[1:]):\n time_chunks.append((left, right))\n # return self.process_chunk(time_chunks[0], df_data)\n # parallelize processing between time chunks\n with Pool(cpu_count()) as p:\n ret_list = p.map(partial(self.process_chunk, df_data=df_data), time_chunks)\n return pd.concat(ret_list)", "def goals_difference_to_time_period(self, team_id, time_period_type='M', time_period_num=1):\n # {{{\n if time_period_type not in ['W', 'M', 'Y', 'S', 'L']:\n time_period_type = 'M'\n if type(time_period_num) is not int or time_period_num == 0:\n time_period_num = 1\n\n if time_period_type in ['W', 'M', 'Y', 'S']:\n goals_scored = np.nan\n goals_conceded = np.nan\n if time_period_type in ['W', 'M', 'Y']:\n matches_containing_team = self.matches[(self.matches[\"HID\"] == team_id) |\n (self.matches[\"AID\"] == team_id)].sort_index()\n if time_period_type == 'W':\n time_period_num *= 7 # week fixed to 7 days\n elif time_period_type == 'M':\n time_period_num *= 30 # month fixed to 30 days\n elif time_period_type == 'Y':\n time_period_num *= 365 # year fixed to 365 days\n\n how_deep_to_past = np.datetime64(self.today) - np.timedelta64(time_period_num, 'D')\n matches_containing_team = matches_containing_team[(matches_containing_team['Date'] >= str(how_deep_to_past))\n & (matches_containing_team['Date'] < self.yesterday)]\n if not matches_containing_team.empty:\n goals_conceded = matches_containing_team[matches_containing_team[\"HID\"] == team_id]['ASC'].sum() + \\\n matches_containing_team[matches_containing_team[\"AID\"] == team_id]['HSC'].sum()\n goals_scored = matches_containing_team[matches_containing_team[\"HID\"] == team_id]['HSC'].sum() + \\\n matches_containing_team[matches_containing_team[\"AID\"] == team_id]['ASC'].sum()\n\n elif time_period_type == 'S':\n # It is assumed that team is already added in DataFrame self.LL_data\n matches_containing_team = self.SL_data.xs(team_id, level='second')[-1-time_period_num:-1]\n if not matches_containing_team.empty:\n goals_conceded = matches_containing_team['SL_Goals_Conceded'].sum()\n goals_scored = matches_containing_team['SL_Goals_Scored'].sum()\n\n return goals_scored - goals_conceded\n elif time_period_type == 'L':\n # It is assumed that team is already added in DataFrame self.LL_data\n return self.LL_data.loc[team_id, 'LL_Goals_Scored'] - self.LL_data.loc[team_id, 'LL_Goals_Conceded']", "def postpone(self, dlt_time, ky_word):\n if ky_word == 'hour':\n self.work_datetime = self.work_datetime + tdelta(seconds=dlt_time * 3600)\n elif ky_word == 'day':\n self.work_datetime = self.work_datetime + tdelta(days=dlt_time)\n elif ky_word == 'week':\n self.work_datetime = self.work_datetime + tdelta(weeks=dlt_time)\n elif ky_word == 'month':\n self.work_datetime = self.work_datetime + tdelta(days=dlt_time * 30)\n self.eisenhower_priority()\n return self.work_datetime", "def get_extension_list(self, timespan, db_lookup):\n\n if (self.sle_dict != None):\n return [self.sle_dict]\n \n t1 = time.time()\n\n # Look for plot period, if not defined then set a default\n try:\n _period_list = option_as_list(self.windrose_dict['period'])\n except (KeyError, TypeError):\n _period_list = ['day'] # 24 hours\n if _period_list is None:\n return Non\n elif hasattr(_period_list, '__iter__') and len(_period_list) > 0:\n sle_dict ={}\n for _period_raw in _period_list:\n _period = _period_raw.strip().lower()\n if _period == 'day':\n # normally this will be 86400 sec but it could be a daylight\n # savings changeover day\n # first get our stop time as a dt object so we can do some\n # dt maths\n _stop_dt = datetime.datetime.fromtimestamp(timespan.stop)\n # then go back 1 day to get our start\n _start_dt = _stop_dt - datetime.timedelta(days=1)\n period = time.mktime(_stop_dt.timetuple()) - time.mktime(_start_dt.timetuple())\n elif _period == 'week':\n # normally this will be 604800 sec but it could be a daylight\n # savings changeover week\n # first get our stop time as a dt object so we can do some\n # dt maths\n _stop_dt = datetime.datetime.fromtimestamp(timespan.stop)\n # then go back 7 days to get our start\n _start_dt = _stop_dt - datetime.timedelta(days=7)\n period = time.mktime(_stop_dt.timetuple()) - time.mktime(_start_dt.timetuple())\n elif _period == 'month':\n # Our start time is midnight one month ago\n # Get a time object for midnight\n _mn_time = datetime.time(0)\n # Get a datetime object for our end datetime\n _day_date = datetime.datetime.fromtimestamp(timespan.stop)\n # Calculate our start timestamp by combining date 1 month\n # ago and midnight time\n _start_ts = int(time.mktime(datetime.datetime.combine(get_ago(_day_date,0,-1),_mn_time).timetuple()))\n # So our period is\n period = timespan.stop - _start_ts\n elif _period == 'year':\n # Our start time is midnight one year ago\n # Get a time object for midnight\n _mn_time = datetime.time(0)\n # Get a datetime object for our end datetime\n _day_date = datetime.datetime.fromtimestamp(timespan.stop)\n # Calculate our start timestamp by combining date 1 year\n # ago and midnight time\n _start_ts = int(time.mktime(datetime.datetime.combine(get_ago(_day_date, -1, 0),_mn_time).timetuple()))\n period = timespan.stop - _start_ts\n elif _period == 'alltime' or _period == 'all':\n _start_ts = startOfDay(db_lookup().firstGoodStamp())\n period = timespan.stop - _start_ts\n else:\n try:\n period = int(_period)\n except:\n # default to 1 day but it could be a daylight savings\n # changeover day\n # first get our stop time as a dt object so we can do some\n # dt maths\n _stop_dt = datetime.datetime.fromtimestamp(timespan.stop)\n # then go back 1 day to get our start\n _start_dt = _stop_dt - datetime.timedelta(days=1)\n period = time.mktime(_stop_dt.timetuple()) - time.mktime(_start_dt.timetuple())\n # Set any aggregation types/intervals if we have a period > 1 week\n if period >= 2678400: # nominal month\n if self.agg_type == None:\n self.agg_type = 'avg'\n if self.agg_interval == None:\n self.agg_interval = 86400\n elif period >= 604800: # nominal week:\n if self.agg_interval == None:\n self.agg_interval = 3600\n else:\n self.agg_interval = 60\n # Can now get our windrose data\n _suffix = str(period) if _period not in ['day', 'week', 'month', 'year', 'all', 'alltime'] else str(_period)\n sle_dict['wr' + _suffix] = self.calcWindRose(timespan, db_lookup, period)\n\n self.sle_dict = sle_dict\n t2 = time.time()\n logdbg2(\"w34highchartsWindRose SLE executed in %0.3f seconds\" % (t2 - t1))\n\n # Return our json data\n return [sle_dict]", "def test_get_time_period(database):\n sess = database.session\n quart_sub = SubmissionFactory(submission_id=1, reporting_fiscal_year=2020, reporting_fiscal_period=6,\n is_fabs=False, is_quarter_format=True)\n month_sub = SubmissionFactory(submission_id=2, reporting_start_date=datetime.datetime(2020, 9, 10),\n is_fabs=False, is_quarter_format=False)\n sess.add_all([quart_sub, month_sub])\n\n # Pass cases\n assert get_time_period(quart_sub) == 'FY 20 / Q2'\n assert get_time_period(month_sub) == '09 / 2020'", "def period_at(self, unit=\"day\", at_time=\"00:00:00\",\n week_day=\"Monday\", day=1):\n if self._start:\n raise OperationFailError(\"Task is already running.\")\n\n time_pattern = r'^([0-1]?\\d|[2][0-3]):[0-5]?\\d:[0-5]?\\d$'\n\n week_day_list = {\n \"Monday\": 0,\n \"Tuesday\": 1,\n \"Wednesday\": 2,\n \"Thursday\": 3,\n \"Friday\": 4,\n \"Saturday\": 5,\n \"Sunday\": 6\n }\n\n if unit == \"day\":\n self._periodic_unit = unit\n\n if not re.match(time_pattern, at_time):\n raise TimeFormatError\n\n tsp = at_time.split(\":\")\n self._at_time = [int(i) for i in tsp]\n\n elif unit == \"week\":\n self._periodic_unit = unit\n\n if not re.match(time_pattern, at_time):\n raise TimeFormatError\n tsp = at_time.split(\":\")\n self._at_time = [int(i) for i in tsp]\n\n if week_day not in week_day_list:\n raise TimeFormatError\n self._at_week_day = week_day_list[week_day]\n\n elif unit == \"month\":\n self._periodic_unit = unit\n\n if not re.match(time_pattern, at_time):\n raise TimeFormatError\n tsp = at_time.split(\":\")\n self._at_time = [int(i) for i in tsp]\n\n if day not in range(1, 32):\n raise TimeFormatError\n self._at_day = day\n\n else:\n raise TimeFormatError\n\n return self", "def preset_period(self, preset_period):\n\n self._preset_period = preset_period", "def checkpoint_period_set(self):\n raise Exception(\"TODO\")", "def set_time_by_timezone(df):\n df = set_city_time_by_timezone(df, 1078, 3)\n df = set_city_time_by_timezone(df, 22390, 4)\n df = set_city_time_by_timezone(df, 22430, 4)\n df = set_city_time_by_timezone(df, 22438, 5)\n return df", "def loop_adding_weeks_details(ad_account, previous_data, starting_date, ad_fields):\n start_date = starting_date\n if isinstance(previous_data, pd.DataFrame):\n while start_date + dt.timedelta(days = 6) < dt.date(2016, 9, 5): #dt.date.today()\n # Extract data from Facebook\n extracted_data = campaign_stats(ad_account, start_date, ad_fields)\n # Transform the extracted data\n transformed_data, index = clean_extracted_data_details(extracted_data, start_date)\n # Add another week\n start_date += dt.timedelta(days = 7)\n # Merge existing datat with new column\n previous_data = pd.merge(\n previous_data, \n transformed_data,\n how = 'outer',\n left_index = True,\n right_index = True\n )\n return sort_data(previous_data, index)\n else:\n # Extract data from Facebook\n extracted_data = campaign_stats(ad_account, start_date, ad_fields)\n # Transform the extracted data\n previous_data, index = clean_extracted_data_details(extracted_data, start_date)\n # Add another week\n start_date += dt.timedelta(days = 7)\n while start_date + dt.timedelta(days = 6) < dt.date(2016, 9, 5): #dt.date.today()\n # Extract data from Facebook\n extracted_data = campaign_stats(ad_account, start_date, ad_fields)\n # Transform the extracted data\n transformed_data, index = clean_extracted_data_details(extracted_data, start_date)\n # Add another week\n start_date += dt.timedelta(days = 7)\n # Merge existing datat with new column\n previous_data = pd.merge(\n previous_data, \n transformed_data,\n how = 'outer',\n left_index = True,\n right_index = True\n )\n return sort_data(previous_data, index)", "def _fill_day_dicts(self):\n today = datetime.date.today()\n for i, record in enumerate(self._dataset):\n if (record[\"createdAt\"] / 1000) > time.mktime((today - datetime.timedelta(days=30)).timetuple()):\n self._add_record(self._all30_dict, record, key=i)\n\n elif (record[\"createdAt\"] / 1000) > time.mktime((today - datetime.timedelta(days=60)).timetuple()):\n self._add_record(self._all60_dict, record, key=i)\n\n else:\n self._add_record(self._all90_dict, record, key=i)", "def assign_to_time_window(df_liquidity, sta, end, hours_s, hours_e, updated=False):\n\n if updated:\n price_name_tag = 'Price Updated'\n else:\n price_name_tag = 'Price'\n\n df_liquidity_sel = df_liquidity[(hours_s >= sta)\n & (hours_e <= end)\n ]\n\n # filter for ice-berg order indication\n df_liquidity_sel_ = df_liquidity_sel[df_liquidity_sel['Volume'] != 0]\n\n # check if df_liquidity_sel_ is not empty and return average in the rare case that now orders are submitted in a specific time frame\n if df_liquidity_sel_.shape[0] > 0:\n return np.average(df_liquidity_sel_[price_name_tag][df_liquidity_sel_['Side'] == 'S'], weights=df_liquidity_sel_['Volume'][df_liquidity_sel_['Side'] == 'S'],)\n -np.average(df_liquidity_sel_[price_name_tag][df_liquidity_sel_['Side'] == 'B'],\n weights=df_liquidity_sel_['Volume'][df_liquidity_sel_['Side'] == 'B'])\n\n else:\n return None", "def addTimeWashed(df): \n # Calculate time washed of food (start of no food)\n time_washed = pd.DataFrame(df.groupby(['date_yyyymmdd'])['wormsorter_start_time'].min())\n time_washed = time_washed.reset_index(drop=False)\n time_washed.columns = ['date_yyyymmdd','time_washed']\n \n df = pd.merge(left=df, right=time_washed, on='date_yyyymmdd')\n \n return df", "def set_time_series(data):\n t = pandas.Series(\n (data['TIME_StartTime'] -\n data['TIME_StartTime'].values[0]) / 1.0e6, name='t, sec')\n data = pandas.DataFrame(\n data.values,\n columns=data.columns, index=t)\n return data", "def prepare_data(self):\r\n annual_df = self.annual_df\r\n coef_df = self.coef_df\r\n quarter_df = self.quarter_df\r\n # historical_df = self.historical_df\r\n Event_Buffer = self.Event_Buffer\r\n\r\n Tot_Prod = coef_df[\"Product\"].nunique()\r\n # Tot_Week = coef_df[\"wk\"].nunique()\r\n Tot_Week = 52\r\n\r\n EDLP_Events = list(annual_df[\"RP_Events\"])\r\n Min_EDLP_Events = [\r\n i - Event_Buffer if i - Event_Buffer >= 0 else 0 for i in EDLP_Events\r\n ]\r\n Max_EDLP_Events = [\r\n i + Event_Buffer if i + Event_Buffer < Tot_Week + 1 else Tot_Week\r\n for i in EDLP_Events\r\n ]\r\n\r\n TPR_Events = list(annual_df[\"TPR_Events\"])\r\n Min_TPR_Events = [\r\n i - Event_Buffer if i - Event_Buffer >= 0 else 0 for i in TPR_Events\r\n ]\r\n Max_TPR_Events = [\r\n i + Event_Buffer if i + Event_Buffer < Tot_Week + 1 else Tot_Week\r\n for i in TPR_Events\r\n ]\r\n\r\n Target_EDLP_Spend = [i for i in annual_df[\"PPG_RP_Spend\"]]\r\n Target_TPR_Spend = [i for i in annual_df[\"PPG_TPR_Spend\"]]\r\n Target_Trade_Spend = [i for i in annual_df[\"PPG_Total_Spend\"]]\r\n\r\n Mapping = {}\r\n Prod_Ind = coef_df[\"Product\"][0:Tot_Prod]\r\n for i, j in zip(Prod_Ind.index, Prod_Ind.values):\r\n Mapping[j] = i\r\n Mapping_reverse = {i: j for j, i in Mapping.items()}\r\n\r\n constants = [i for i in coef_df[\"constant\"]]\r\n\r\n Cat_Coef = coef_df[\"Catalogue\"][0:Tot_Prod]\r\n\r\n Disp_Coef = coef_df[\"Display\"][0:Tot_Prod]\r\n\r\n Base_Price_stg1 = [i for i in quarter_df[\"Final_baseprice\"]]\r\n Intercepts_stg1 = []\r\n for pr in range(Tot_Prod):\r\n Intercepts_stg1.append(\r\n np.mean([constants[j * Tot_Prod + pr] for j in range(0, Tot_Week)])\r\n )\r\n\r\n Base_Price_stg2 = [[i] * Tot_Week for i in quarter_df[\"Final_baseprice\"]]\r\n Intercepts_stg2 = [\r\n constants[j : j + Tot_Prod] for j in range(0, len(constants), Tot_Prod)\r\n ] # noqa\r\n\r\n EDLP_Coef = np.array(\r\n coef_df[[i for i in coef_df.columns if i.count(\"Retailer_Regular\") == 1]]\r\n )\r\n TPR_Coef = np.array(\r\n coef_df[[i for i in coef_df.columns if i.count(\"Retailer_Promoted\") == 1]]\r\n )\r\n\r\n # ################################ Available EDLP Interactions pairs ##############################\r\n\r\n EDLP = [\r\n re.findall(r\"[0-9]+\", i)\r\n for i in coef_df.columns\r\n if i.count(\"Retailer_Regular\") > 1\r\n ]\r\n EDLP_Interactions = []\r\n for i in EDLP:\r\n temp = []\r\n for j in i:\r\n temp.append(int(j))\r\n EDLP_Interactions.append(temp)\r\n\r\n # ###################################### Available TPR Interactions pairs #########################\r\n\r\n TPR = [\r\n re.findall(r\"[0-9]+\", i)\r\n for i in coef_df.columns\r\n if i.count(\"Retailer_Promoted\") > 1\r\n ]\r\n TPR_Interactions = []\r\n for i in TPR:\r\n temp = []\r\n for j in i:\r\n temp.append(int(j))\r\n TPR_Interactions.append(temp)\r\n\r\n # ###################################### EDLP_Interaction_Coef_Values ############################\r\n\r\n EDLP_Int_Coef_Values = {}\r\n for col in coef_df.columns:\r\n if col.count(\"Retailer_Regular\") > 1:\r\n Pair_name = \"_\".join([str(int(i)) for i in re.findall(r\"[0-9]+\", col)])\r\n EDLP_Int_Coef_Values[Pair_name] = list(coef_df[col])\r\n\r\n # ###################################### TPR_Interaction_Coef_Values #############################\r\n\r\n TPR_Int_Coef_Values = {}\r\n for col in coef_df.columns:\r\n if col.count(\"Retailer_Promoted\") > 1:\r\n Pair_name = \"_\".join([str(int(i)) for i in re.findall(r\"[0-9]+\", col)])\r\n TPR_Int_Coef_Values[Pair_name] = list(coef_df[col])\r\n\r\n # ##################################### Loading Pantry Loading Coefficients #######################\r\n\r\n Pantry_1 = list(coef_df[\"Pantry_Loading_1\"])\r\n Pantry_1 = [\r\n Pantry_1[j : j + Tot_Prod] for j in range(0, len(Pantry_1), Tot_Prod)\r\n ]\r\n Pantry_2 = list(coef_df[\"Pantry_Loading_2\"])\r\n Pantry_2 = [\r\n Pantry_2[j : j + Tot_Prod] for j in range(0, len(Pantry_2), Tot_Prod)\r\n ]\r\n\r\n # TE_Coeff = np.array(Promo_df[[\"TE_Promo\",\"TE_NoPromo\"]])\r\n self.Tot_Prod = Tot_Prod\r\n self.Tot_Week = Tot_Week\r\n self.EDLP_Events = EDLP_Events\r\n self.Min_EDLP_Events = Min_EDLP_Events\r\n self.Max_EDLP_Events = Max_EDLP_Events\r\n self.TPR_Events = TPR_Events\r\n self.Min_TPR_Events = Min_TPR_Events\r\n self.Max_TPR_Events = Max_TPR_Events\r\n\r\n self.Target_EDLP_Spend = Target_EDLP_Spend\r\n self.Target_TPR_Spend = Target_TPR_Spend\r\n self.Target_Trade_Spend = Target_Trade_Spend\r\n self.Mapping = Mapping\r\n self.Mapping_reverse = Mapping_reverse\r\n self.constants = constants\r\n self.EDLP_Coef = EDLP_Coef\r\n self.TPR_Coef = TPR_Coef\r\n\r\n self.EDLP_Interactions = EDLP_Interactions\r\n self.TPR_Interactions = TPR_Interactions\r\n self.EDLP_Int_Coef_Values = EDLP_Int_Coef_Values\r\n self.TPR_Int_Coef_Values = TPR_Int_Coef_Values\r\n self.Pantry_1 = Pantry_1\r\n self.Pantry_2 = Pantry_2\r\n\r\n self.Base_Price_stg1 = Base_Price_stg1\r\n self.Intercepts_stg1 = Intercepts_stg1\r\n self.Base_Price_stg2 = Base_Price_stg2\r\n self.Intercepts_stg2 = Intercepts_stg2\r\n\r\n self.Cat_Coef = Cat_Coef\r\n self.Disp_Coef = Disp_Coef", "def extract_tt_by_periods(ttri, periods, start_time, end_time, filters):\n logger = getLogger(__name__)\n # sess = conn.get_session()\n das = {}\n all_wz_features = {}\n all_wz_laneconfigs = {}\n\n # collecting daily data\n for prd in periods:\n logger.debug('>>>> retrieving data for %s' % prd.get_date_string())\n year = prd.start_date.year\n sdate = prd.start_date\n edate = prd.end_date\n if year not in das:\n da_tt = tt.TravelTimeDataAccess(year)\n da_tt_wz = tt_workzone.TTWorkZoneDataAccess(year)\n da_tt_wz_feature = wz_feature.WZFeatureDataAccess()\n da_tt_wz_lncfg = wz_laneconfig.WZLaneConfigDataAccess()\n da_tt_weather = tt_weather.TTWeatherDataAccess(year)\n da_tt_snowmgmt = tt_snowmgmt.TTSnowManagementDataAccess(year)\n da_tt_incident = tt_incident.TTIncidentDataAccess(year)\n da_tt_specialevent = tt_specialevent.TTSpecialeventDataAccess(year)\n das[year] = (\n da_tt, da_tt_wz, da_tt_wz_feature, da_tt_wz_lncfg, da_tt_weather, da_tt_snowmgmt, da_tt_incident,\n da_tt_specialevent)\n\n (da_tt, da_tt_wz, da_tt_wz_feature, da_tt_wz_lncfg, da_tt_weather, da_tt_snowmgmt, da_tt_incident,\n da_tt_specialevent) = das[year]\n\n # traveltimes = da_tt.list_by_period(ttri.id, self.prd)\n weathers = da_tt_weather.list(ttri.id, sdate, edate, as_model=True)\n \"\"\":type: list[pyticas_tetres.ttrms_types.WeatherInfo] \"\"\"\n workzones = da_tt_wz.list(ttri.id, sdate, edate, as_model=True)\n \"\"\":type: list[pyticas_tetres.ttrms_types.WorkZoneInfo] \"\"\"\n incidents = da_tt_incident.list(ttri.id, sdate, edate, as_model=True)\n \"\"\":type: list[pyticas_tetres.ttrms_types.IncidentInfo] \"\"\"\n snowmgmts = da_tt_snowmgmt.list(ttri.id, sdate, edate, as_model=True)\n \"\"\":type: list[pyticas_tetres.ttrms_types.SnowManagementInfo] \"\"\"\n specialevents = da_tt_specialevent.list(ttri.id, sdate, edate, as_model=True)\n \"\"\":type: list[pyticas_tetres.ttrms_types.SpecialEventInfo] \"\"\"\n traveltimes = da_tt.list_by_period(ttri.id, prd)\n \"\"\":type: list[pyticas_tetres.ttrms_types.TravelTimeInfo] \"\"\"\n\n if not any(weathers):\n logger.debug('>>>> end of retrieving data for %s (no weather data)' % prd.get_date_string())\n continue\n\n extras = {\n 'weathers': {_tt.id: [] for _tt in traveltimes},\n 'workzones': {_tt.id: [] for _tt in traveltimes},\n 'incidents': {_tt.id: [] for _tt in traveltimes},\n 'specialevents': {_tt.id: [] for _tt in traveltimes},\n 'snowmgmts': {_tt.id: [] for _tt in traveltimes},\n }\n \"\"\":type: dict[str, dict[int, list]]\"\"\"\n\n _put_to_bucket(ttri, weathers, extras['weathers'], da_tt_weather, year, all_wz_features, all_wz_laneconfigs, das)\n _put_to_bucket(ttri, workzones, extras['workzones'], da_tt_wz, year, all_wz_features, all_wz_laneconfigs, das)\n _put_to_bucket(ttri, incidents, extras['incidents'], da_tt_incident, year, all_wz_features, all_wz_laneconfigs, das)\n _put_to_bucket(ttri, snowmgmts, extras['snowmgmts'], da_tt_snowmgmt, year, all_wz_features, all_wz_laneconfigs, das)\n _put_to_bucket(ttri, specialevents, extras['specialevents'], da_tt_specialevent, year, all_wz_features, all_wz_laneconfigs, das)\n\n for tti in traveltimes:\n _tt_weathers = extras['weathers'][tti.id]\n extdata = ExtData(tti,\n _tt_weathers[0] if _tt_weathers else None,\n extras['incidents'][tti.id],\n extras['workzones'][tti.id],\n extras['specialevents'][tti.id],\n extras['snowmgmts'][tti.id])\n\n if start_time <= tti.str2datetime(tti.time).time() <= end_time:\n for ef in filters:\n try:\n ef.check(extdata)\n except Exception as ex:\n tb.traceback(ex)\n logger.debug('>>>> end of retrieving data for %s (error occured 1)' % prd.get_date_string())\n continue\n else:\n for ef in filters:\n try:\n ef.check_outofrange(extdata)\n except Exception as ex:\n tb.traceback(ex)\n logger.debug('>>>> end of retrieving data for %s (error occured 2)' % prd.get_date_string())\n continue\n\n del extras\n logger.debug('>>>> end of retrieving data for %s' % prd.get_date_string())\n\n # sess.close()", "def test_reportperiod_updatetimesheet_save_only_set(self):\n date = self.reporting_period.start_date.strftime('%Y-%m-%d')\n response = self.app.post(\n reverse(\n 'reportingperiod:UpdateTimesheet',\n kwargs={'reporting_period': date}\n ),\n {\n 'save_only': '1',\n 'timecardobject_set-TOTAL_FORMS': '1',\n 'timecardobject_set-INITIAL_FORMS': '0',\n 'timecardobject_set-MIN_NUM_FORMS': '0',\n 'timecardobject_set-MAX_NUM_FORMS': '1000',\n 'timecardobject_set-0-project': '4',\n 'timecardobject_set-0-hours_spent': '',\n },\n headers={'X_FORWARDED_EMAIL': self.regular_user.email},\n )\n formset = response.context['formset']\n self.assertTrue(formset.save_only)", "def single_curtailment_each_day_between_2_and_3_am(\n start: datetime, end: datetime, resolution: timedelta\n) -> DataFrame:\n opportunity_start_time = \"2:00\"\n opportunity_end_time = \"3:00\"\n imbalance_value = 100 # MW\n imbalance_price_between_2_and_3_am = 10 # EUR/MWh\n df = initialize_df(\n columns=[\"Imbalance (in MW)\", \"Price (in EUR/MWh)\"],\n start=start,\n end=end,\n resolution=resolution,\n )\n df[\"Imbalance (in MW)\"].iloc[\n df.index.indexer_between_time(\n start_time=opportunity_start_time,\n end_time=opportunity_end_time,\n include_end=False,\n )\n ] = imbalance_value\n df[\"Price (in EUR/MWh)\"].iloc[\n df.index.indexer_between_time(\n start_time=opportunity_start_time,\n end_time=opportunity_end_time,\n include_end=False,\n )\n ] = imbalance_price_between_2_and_3_am\n return df" ]
[ "0.6012442", "0.59708464", "0.54916745", "0.5437837", "0.52865595", "0.5175374", "0.5171756", "0.51660645", "0.51351386", "0.5048385", "0.504096", "0.5034693", "0.50177866", "0.50149727", "0.4976959", "0.49694532", "0.4965803", "0.4921499", "0.49133396", "0.48727775", "0.48724777", "0.4867688", "0.48499215", "0.48366544", "0.48363397", "0.4824985", "0.48249424", "0.48247388", "0.48180604", "0.48135704" ]
0.7606388
0
anova_analysis takes in a data frame and performs an anova test for hypothesis testing 1 prints out the test results
def anova_analysis(df): time_periods = df.groupby(['week_ending','Holiday'],as_index = False)[['seats_sold']].sum() TG = time_periods.loc[time_periods['Holiday'] == 'ThanksGiving','seats_sold'] WB = time_periods.loc[time_periods['Holiday'] == 'WinterBreak','seats_sold'] SB = time_periods.loc[time_periods['Holiday'] == 'SummerBreak','seats_sold'] NH = time_periods.loc[time_periods['Holiday'] == 'Not Holiday','seats_sold'] f,p = stats.f_oneway(TG,WB,SB,NH) print('The f and p of ANOVA analysis are:') print(f,p) ## plot the mean of each group time_periods.boxplot('seats_sold', by='Holiday', figsize=(12, 8)) fileName = 'ANOVA.png' plt.savefig(fileName) print("The mean seats sold of each time periods:") print(time_periods.groupby('Holiday')['seats_sold'].mean()) pairwise = MultiComparison(time_periods['seats_sold'], time_periods['Holiday']) result = pairwise.tukeyhsd() print(pairwise) print(result) #print(pairwise.groupsunique)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testANOVA():\n\n data = {'Control': [54, 23, 45, 54, 45, 47], 'Treated': [87, 98, 64, 77, 89],\n 'TreatedAntagonist': [45, 39, 51, 49, 50, 55]}\n print(type(data))\n OneWayAnova(dataStruct=data, dataLabel='3 Groups', mode='parametric')\n print ('-'*80)\n print ('Compare to Prism output: ')\n print( \"\"\"\n \"Table Analyzed\"\t\"One-way ANOVA data\"\n\n \"ANOVA summary\"\n \" F\"\t22.57\n \" P value\"\t\"< 0.0001\"\n \" P value summary\"\t****\n \" Are differences among means statistically significant? (P < 0.05)\"\tYes\n \" R square\"\t0.7633\n\n \"Brown-Forsythe test\"\n \" F (DFn, DFd)\"\t\"0.7307 (2, 14)\"\n \" P value\"\t0.4991\n \" P value summary\"\tns\n \" Significantly different standard deviations? (P < 0.05)\"\tNo\n\n \"Bartlett's test\"\n \" Bartlett's statistic (corrected)\"\t2.986\n \" P value\"\t0.2247\n \" P value summary\"\tns\n \" Significantly different standard deviations? (P < 0.05)\"\tNo\n\n \"ANOVA table\"\tSS\tDF\tMS\t\"F (DFn, DFd)\"\t\"P value\"\n \" Treatment (between columns)\"\t4760\t2\t2380\t\"F (2, 14) = 22.57\"\t\"P < 0.0001\"\n \" Residual (within columns)\"\t1476\t14\t105.4\n \" Total\"\t6236\t16\n\n \"Data summary\"\n \" Number of treatments (columns)\"\t3\n \" Number of values (total)\"\t17\t\t\t\t\"\"\")\n print ('-'*80)\n print ('Multiple comparisions from Prism:')\n print (\"\"\"\n \"Number of families\"\t1\n \"Number of comparisons per family\"\t3\n Alpha\t0.05\n\n \"Tukey's multiple comparisons test\"\t\"Mean Diff.\"\t\"95% CI of diff.\"\tSignificant?\tSummary\n\n \" Control vs. Treated\"\t-38.33\t\"-54.61 to -22.06\"\tYes\t****\t\tA-B\n \" Control vs. Treated+Antagonist\"\t-3.500\t\"-19.02 to 12.02\"\tNo\tns\t\tA-C\n \" Treated vs. Treated+Antagonist\"\t34.83\t\"18.56 to 51.11\"\tYes\t***\t\tB-C\n\n\n \"Test details\"\t\"Mean 1\"\t\"Mean 2\"\t\"Mean Diff.\"\t\"SE of diff.\"\tn1\tn2\tq\tDF\n\n \" Control vs. Treated\"\t44.67\t83.00\t-38.33\t6.218\t6\t5\t8.719\t14\n \" Control vs. Treated+Antagonist\"\t44.67\t48.17\t-3.500\t5.928\t6\t6\t0.8349\t14\n \" Treated vs. Treated+Antagonist\"\t83.00\t48.17\t34.83\t6.218\t5\t6\t7.923\t14\n\n \"\"\")", "def one_way_anova(data_lastDV):\n col_names = data_lastDV.columns.values.tolist() # get the columns' names\n outcome = col_names.pop() # remove the last item in the list\n\n fig = plt.figure()\n i = 1\n\n for cond in col_names:\n cond_table = data_lastDV[[cond, outcome]].dropna()\n cond_lm = ols(outcome + \" ~ C(\" + cond + \")\", data=cond_table).fit()\n anova_table = anova_lm(cond_lm)\n\n print(\"\\n\"+FORMAT_LINE)\n print(\"One-Way ANOVA: \" + cond + \" --> \" + outcome)\n print(FORMAT_LINE)\n print(anova_table)\n #print(cond_lm.model.data.orig_exog)\n print(cond_lm.summary())\n\n ax = fig.add_subplot(1,2, i)\n ax = cond_table.boxplot(outcome, cond, ax=plt.gca())\n ax.set_xlabel(cond)\n ax.set_ylabel(outcome)\n i += 1\n # box plot\n user_input = input(\">> Display boxplot of conditions? [y/n]: \")\n if is_yes(user_input):\n fig.tight_layout()\n plt.show()", "def anova_interaction(data_lastDV):\n\n col_names = data_lastDV.columns.values # get the columns' names\n factor_groups = data_lastDV[col_names].dropna()\n if len(col_names) < 3:\n print(\"ERROR in statsMOOC.py: Not enough columns in dataframe to do interaction analysis: \" + len(col_names))\n\n # two-way anova\n formula = col_names[2] + \" ~ C(\" + col_names[0] + \") + C(\" + col_names[1] + \")\"\n formula_interaction = formula.replace('+', '*')\n interaction_lm = ols(formula, data=factor_groups).fit() # linear model\n print(interaction_lm.summary())\n\n print(FORMAT_LINE)\n print(\"- \" + col_names[2] + \" = \" + col_names[0] + \" * \" + col_names[1] + \" Interaction -\")\n print(anova_lm(ols(formula_interaction, data=factor_groups).fit(), interaction_lm))\n\n print(FORMAT_LINE)\n print(\"- \" + col_names[2] + \" = \" + col_names[0] + \" + \" + col_names[1] + \" ANOVA -\")\n print(anova_lm(ols(col_names[2] + \" ~ C(\" + col_names[0] + \")\", data=factor_groups).fit(), ols(col_names[2] +\" ~ C(\"+col_names[0]+\") + C(\" + col_names[1]+\", Sum)\", data=factor_groups).fit()))\n\n print(FORMAT_LINE)\n print(\"- \" + col_names[2] + \" = \" + col_names[1] + \" + \" + col_names[0] + \" ANOVA -\")\n print(anova_lm(ols(col_names[2] + \" ~ C(\" + col_names[1] + \")\", data=factor_groups).fit(), ols(col_names[2] +\" ~ C(\"+col_names[0]+\") + C(\" + col_names[1]+\", Sum)\", data=factor_groups).fit()))", "def anova_test_data(csv_data, types):\n f_statistics, p = 0, 0\n\n if len(types) == 2:\n f_statistics, p = stats.f_oneway(csv_data[types[0]], csv_data[types[1]])\n elif len(types) == 3:\n f_statistics, p = stats.f_oneway(csv_data[types[0]], csv_data[types[1]], csv_data[types[2]])\n elif len(types) == 4:\n f_statistics, p = stats.f_oneway(csv_data[types[0]], csv_data[types[1]], csv_data[types[2]], csv_data[types[3]])\n\n print('f_statistics={0}; p={1}'.format(f_statistics, p))", "def anova(df, dependent_var, independent_var):\n\n values = list()\n df.fillna('missing', inplace=True)\n for level in df[dependent_var].unique().tolist():\n values.append(df.loc[df[dependent_var] == level, independent_var]\n .values.tolist())\n f_value, p_value = stats.f_oneway(*values)\n\n return f_value, p_value", "def analyze_run():\n file_datas_dict = load_datas(Args.data_files)\n plotables_dict = dict()\n for file_name, datas in file_datas_dict.viewitems():\n analized_datas = analyze_datas(datas,Args.analysis_attributes)\n plotables = ana_results_to_plotables(\n analized_datas,\n Args.analysis_attributes\n )\n if Args.dm_file_out:\n analysis_save_dm(\n analized_datas,\n plotables,\n Args.analysis_attributes,\n Args.dm_file_out\n )\n if Args.mat_file_out:\n analysis_save(\n plotables,\n Args.analysis_attributes,\n Args.mat_file_out\n )\n if Args.verbose:\n plotables_dict[file_name] = plotables\n if Args.verbose:\n ana_plot_figures(plotables_dict,Args.analysis_attributes)", "def calc_anova(independent, dependent, alpha=0.05):\n combined = pd.concat([independent, dependent], axis=1)\n combined = combined.rename(columns={independent.name: \"indep\", dependent.name: \"dep\"})\n mod = ols('dep ~ C(indep)', data=combined).fit()\n aov_table = sm.stats.anova_lm(mod, typ=2)\n effect_size = aov_table[\"sum_sq\"][\"C(indep)\"] / aov_table[\"sum_sq\"][\"Residual\"]\n power = FTestAnovaPower().power(effect_size, len(combined), alpha, len(combined.groupby(by=\"indep\").groups))\n corr = []\n if aov_table['PR(>F)']['C(indep)'] < 0.05:\n pair_t = mod.t_test_pairwise('C(indep)')\n corr = pair_t.result_frame[pair_t.result_frame['reject-hs']].index.to_list()\n # print(corr)\n return aov_table['PR(>F)']['C(indep)'], corr, power, effect_size", "def general_analysis(df):\n pass", "def analyse_data(file_name, data_types, agent_types, types):\n metrics_data = pd.read_csv(file_name)\n\n for agent_type in agent_types:\n for data_type in data_types:\n csv_data = {}\n # Separating the data based on the winner type and extracting only what's important\n for element_type in types[agent_type]:\n csv_data[element_type] = list(metrics_data[data_type][metrics_data[agent_type] == element_type])\n\n visualise_data(csv_data, types[agent_type], data_type, agent_type)\n\n print(\"----------------------------------------------------------\")\n print(\"ANOVA test for '{1}' in terms of '{0}':\".format(agent_type, data_type))\n anova_test_data(csv_data, types[agent_type])\n print(\"----------------------------------------------------------\")", "def analyze(self, anomalies: pa.DataFrame) -> pa.DataFrame:\n raise NotImplementedError()", "def control_variation(df, outDir, features_to_analyse, \n variables_to_analyse=[\"date_yyyymmdd\"], \n remove_outliers=True, \n p_value_threshold=0.05, \n PCs_to_keep=10):\n \n # Record non-data columns before dropping feature columns \n other_colnames = [col for col in df.columns if col not in features_to_analyse]\n \n # Drop columns that contain only zeros\n colnames_before = list(df.columns)\n AllZeroFeats = df[features_to_analyse].columns[(df[features_to_analyse] == 0).all()]\n df = df.drop(columns=AllZeroFeats)\n colnames_after = list(df.columns)\n zero_cols = [col for col in colnames_before if col not in colnames_after]\n if len(zero_cols) > 0:\n print(\"Dropped %d features with all-zero summaries:\\n%s\" % (len(zero_cols), zero_cols))\n \n # Record feature column names after dropping zero data\n features_to_analyse = [feat for feat in df.columns if feat not in other_colnames]\n \n # Remove outliers from the dataset \n if remove_outliers:\n df, indsOutliers = removeOutliersMahalanobis(df, features_to_analyse)\n remove_outliers = False \n # NB: Ensure Mahalanobis operation to remove outliers is performed only once!\n\n # Check for normality in features to analyse in order decide which \n # statistical test to use: one-way ANOVA (parametric) or Kruskal-Wallis \n # (non-parametric) test\n TEST = check_normality(df, features_to_analyse, p_value_threshold)\n\n # Record name of statistical test used (kruskal/f_oneway)\n test_name = str(TEST).split(' ')[1].split('.')[-1].split('(')[0].split('\\'')[0]\n\n # CONTROL VARIATION: STATS (ANOVAs)\n # - Does N2 worm behaviour on control vary across experiment days? \n # (worms are larger? Shorter L1 diapuase? Camera focus/FOV adjusted? Skewed by non-worm tracked objects?\n # Did not record time when worms were refed! Could be this. If so, worms will be bigger across all foods on that day) \n # - Perform ANOVA to see if features vary across imaging days for control\n # - Perform Tukey HSD post-hoc analyses for pairwise differences between imaging days\n # - Highlight outlier imaging days and investigate reasons why\n # - Save list of top significant features for outlier days - are they size-related features?\n for grouping_variable in variables_to_analyse:\n print(\"\\nTESTING: %s\\n\" % grouping_variable)\n \n if not len(df[grouping_variable].unique()) > 1:\n print(\"Need at least two groups for stats to investigate %s\" % grouping_variable)\n else:\n print(\"Performing %s tests for '%s'\" % (test_name, grouping_variable)) \n \n test_results_df, sigfeats_out = \\\n topfeats_ANOVA_by_group(df, \n grouping_variable, \n features_to_analyse,\n TEST,\n p_value_threshold)\n \n # Ensure directory exists to save results\n Path(outDir).mkdir(exist_ok=True, parents=True)\n \n # Define outpaths\n froot = 'control_variation_in_' + grouping_variable + '_' + test_name\n stats_outpath = outDir / (froot + \"_results.csv\")\n sigfeats_outpath = outDir / (froot + \"_significant_features.csv\")\n \n # Save test statistics + significant features list to file\n test_results_df.to_csv(stats_outpath)\n sigfeats_out.to_csv(sigfeats_outpath, header=False)\n\n # Box plots\n plotDir = outDir / \"Plots\"\n topfeats_boxplots_by_group(df, \n test_results_df, \n grouping_variable,\n plot_save_dir=plotDir, #save to plotDir\n p_value_threshold=p_value_threshold)\n \n # PCA (coloured by grouping variable, eg. experiment date)\n df = doPCA(df, \n grouping_variable, \n features_to_analyse,\n plot_save_dir = plotDir,\n PCs_to_keep = PCs_to_keep)", "def do_analyse(args):\n\n pd.set_option('display.max_columns', None)\n pd.set_option('display.width', None)\n\n score = 'score'\n\n # Read in the results, and add a boolean target column.\n df = pd.read_csv(args.results, index_col=0)\n df['target'] = df['verify_speaker'] == df['enrol_speaker']\n\n # Calculate ideal 0.01% threshold over the multi-session data.\n nontarget_df = df.loc[df['target'] == False].sort_values(score, ascending=False)\n nontarget_count = nontarget_df[score].count()\n th_calc = nontarget_df.iloc[int(nontarget_count * (1 / 10000))][score]\n\n # Now filter the data so that we only consider mono-session enrolment and verification.\n df = df.loc[df['verify_room'] == df['enrol_room']]\n target_df = df.loc[df['target'] == True].sort_values(score, ascending=False)\n nontarget_df = df.loc[df['target'] == False].sort_values(score, ascending=False)\n target_count = target_df[score].count()\n nontarget_count = nontarget_df[score].count()\n\n # Calculate FA/FR for the user-defined threshold.\n th_user = args.th_user\n fr_user = target_df.loc[target_df[score] < th_user][score].count()\n fa_user = nontarget_df.loc[nontarget_df[score] > th_user][score].count()\n frr_user = fr_user / target_count\n far_user = fa_user / nontarget_count\n label_user = 'User Threshold: th {:.4f}, FR {} ({:.3f}%), FA {} ({:.3f}%)'.format(th_user, fr_user, frr_user * 100,\n fa_user, far_user * 100)\n\n # Calculate the FA/FR for the ideal threshold calculated from the multi-session data.\n fr_calc = target_df.loc[target_df[score] < th_calc][score].count()\n fa_calc = nontarget_df.loc[nontarget_df[score] > th_calc][score].count()\n frr_calc = fr_calc / target_count\n far_calc = fa_calc / nontarget_count\n label_calc = 'Calc Threshold: th {:.4f}, FR {} ({:.3f}%), FA {} ({:.3f}%)'.format(th_calc, fr_calc, frr_calc * 100,\n fa_calc, far_calc * 100)\n\n # Print the stats.\n print('\\nTarget Stats:')\n print(target_df[score].describe())\n print('\\nNon-Target Stats:')\n print(nontarget_df[score].describe())\n print('\\nThresholds:')\n print(label_user)\n print(label_calc)\n\n # Paint the graphs.\n paint_graph(score, 'verify_room', df, th_user, label_user, th_calc, label_calc)\n paint_graph(score, 'enrol_room', df, th_user, label_user, th_calc, label_calc)\n paint_graph(score, 'verify_speaker', df, th_user, label_user, th_calc, label_calc)\n paint_graph(score, 'enrol_speaker', df, th_user, label_user, th_calc, label_calc)", "def run_ANOVA_trajetories(groups):\n values = []\n labels = []\n for trajectory in groups:\n labels.append(trajectory)\n values.append(groups[trajectory])\n \n # This is copied from otu_category_significance.py\n try:\n F, prob = ANOVA_one_way(values)\n group_means = [i.mean() for i in values]\n except ValueError:\n #set the p-value to 'diff' if the variances are 0.0 (within rounding \n #error) and the means are not all the same. If the means are all\n #the same and the variances are 0.0, set the p-value to 1\n group_means = []\n group_variances = []\n for i in values:\n group_means.append(i.Mean)\n group_variances.append(i.Variance)\n ##### Added list to always return the same type of output\n group_means = list(set(group_means))\n if sum(group_variances) < 1e-21 and len(group_means) > 1:\n prob = 0.0\n else:\n prob = 1.0\n \n return labels, group_means, prob", "def mixed_anova_synergy_index_z(dataframe):\n if dataframe['condition'].nunique() <= 1:\n raise ValueError(\"ERROR: Between factor has insufficient number of levels.\")\n #ToDo: If there's only 1 condition, run ANOVA with one within factor instead.\n if dataframe['block'].nunique() <= 1:\n raise ValueError(\"ERROR: Between factor has insufficient number of levels.\")\n #ToDo: If there's only 1 block, run ANOVA with one between factor instead.\n aov = pg.mixed_anova(data=dataframe, dv='dVz', within='block', subject='user', between='condition', correction=True)\n return aov", "def test_ANOVA_one_way(self):\r\n g1 = array([10.0, 11.0, 10.0, 5.0, 6.0])\r\n g2 = array([1.0, 2.0, 3.0, 4.0, 1.0, 2.0])\r\n g3 = array([6.0, 7.0, 5.0, 6.0, 7.0])\r\n i = [g1, g2, g3]\r\n # dfn, dfd, F, between_MS, within_MS, group_means, prob = ANOVA_one_way(i)\r\n F, pval = ANOVA_one_way(i)\r\n # self.assertEqual(dfn, 2)\r\n # self.assertEqual(dfd, 13)\r\n self.assertFloatEqual(F, 18.565450643776831)\r\n # self.assertFloatEqual(between_MS, 55.458333333333343)\r\n # self.assertFloatEqual(within_MS, 2.9871794871794868)\r\n # self.assertFloatEqual(group_means, [8.4000000000000004, 2.1666666666666665, 6.2000000000000002])\r\n self.assertFloatEqual(pval, 0.00015486238993089464)", "def test_2():\n table = pandas.read_csv('data/matches.csv')\n query_result = show.show(table,\n dimensions=['player_of_match'],\n metric='win_by_runs' ,\n slices=[('season', Filters.EQUAL_TO, 2017)],\n \t date_range=('2017-05-09', '2017-05-12'),\n \t date_column_name='date', day_first=False,\n \t summary_operator=SummaryOperators.MEAN)\n print(query_result)\n expected_result = \"\"\" player_of_match MEAN of win_by_runs\n0 KK Nair 7\n1 MM Sharma 14\n2 SS Iyer 0\n3 WP Saha 7\"\"\"\n\n expected_suggestions = \"[]\"\n\n assert(expected_result == query_result[0].to_string())\n assert(expected_suggestions == str(query_result[1]))", "def test_adfuller(input_df, maxlag=1):\n tstats = []\n pvals = []\n for name, vals in input_df.iteritems():\n adf = tsa.stattools.adfuller(vals.dropna(), maxlag=maxlag)\n tstats.append(adf[0])\n pvals.append(adf[1])\n results = pd.DataFrame(np.vstack([tstats, pvals]).T, index=input_df.columns, columns=['tstat', 'pval'])\n return results", "def ANOVA_stats(subject_list, data_dir, h5_type, model_types):\n\t\n\tall_subjs = []\n\tall_models = []\n\tall_corrs = []\n\tcorrs = dict()\n\n\tfor model in model_types: # 3 total models we are comparing\n\t\tfor s in subject_list:\n\t\t\t# Load the STRF file for each individual model for the subject of interest\n\t\t\t# (phnfeat only, env only, or pitch only)\n\t\t\tstrf_file = '%s/%s/%s_STRF_by_%s_%s.hf5'%(data_dir, s, s, model, h5_type) # The STRF for this subject and this model type (env, phnfeat, or pitch)\n\t\t\twith h5py.File(strf_file,'r') as hf:\n\t\t\t\tcorrs[s] = hf['corrs_%s' %(h5_type.lower())][:] # Load the corrs\n\t\t\tfor ch in np.arange(64):\n\t\t\t\t# We have to do this so we have the subjects and models\n\t\t\t\t# columns that match the correlations vector\n\t\t\t\tall_subjs.append(s)\n\t\t\t\tall_models.append(model)\n\t\t\t\tall_corrs.append(corrs[s][ch])\n\tdata= {'corrs': np.array(all_corrs).ravel(), 'subject': all_subjs, 'STRF_type': all_models}\n\tdf = pd.DataFrame.from_dict(data)\n\tdf\n\t\n\t# Run a Friedman ANOVA (non-parametric equivalent of the repeated measures ANOVA)\n\t# with STRF performance as yhour dependent variable, STRF type (env, phnfeat, pitch) \n\t# as your within subjects measure, and subject as your subject. Look at p-unc for\n\t# the p value\n\tdata = df.groupby(['subject', 'STRF_type']).mean().reset_index()\n\t#print(data)\n\tpg.friedman(data=df, dv='corrs', within='STRF_type', subject='subject')\n\t\n\t# if p<0.05, run post-hoc sign-rank tests\n\n\t#extract just the corr values from the dataframe - will be used for post-hoc sign-rank tests\n\tpitch_x = data['corrs'][np.where(data['STRF_type']=='pitch')[0]]\n\tphnfeat_x = data['corrs'][np.where(data['STRF_type']=='phnfeat')[0]]\n\tenvs_x = data['corrs'][np.where(data['STRF_type']=='envs')[0]]\n\ttotalmodel_x = data['corrs'][np.where(data['STRF_type']=='pitchenvsphnfeat')[0]]\n\n\n\t#run wilcoxon signrank test - compare total model with individual features\n\tprint(pg.wilcoxon(totalmodel_x, phnfeat_x, tail='two-sided')) \n\tprint(pg.wilcoxon(totalmodel_x, envs_x, tail='two-sided')) \n\tprint(pg.wilcoxon(totalmodel_x, pitch_x, tail='two-sided'))\n\n\t#run wilcoxon signrank test - compare individual feature models with each other \n\tprint(pg.wilcoxon(phnfeat_x,pitch_x, tail='two-sided'))\n\tprint(pg.wilcoxon(envs_x, pitch_x, tail='two-sided'))\n\tprint(pg.wilcoxon(phnfeat_x, envs_x, tail='two-sided'))", "def adf_test(series,title=''):\n print(f'Augmented Dickey-Fuller Test: {title}')\n result = adfuller(series.dropna(),autolag='AIC') # .dropna() handles differenced data\n \n labels = ['ADF test statistic','p-value','# lags used','# observations']\n out = pd.Series(result[0:4],index=labels)\n\n for key,val in result[4].items():\n out[f'critical value ({key})']=val\n \n print(out.to_string()) # .to_string() removes the line \"dtype: float64\"\n \n if result[1] <= 0.05:\n print(\"Strong evidence against the null hypothesis\")\n print(\"Reject the null hypothesis\")\n print(\"Data has no unit root and is stationary\")\n else:\n print(\"Weak evidence against the null hypothesis\")\n print(\"Fail to reject the null hypothesis\")\n print(\"Data has a unit root and is non-stationary\")", "def run_test(self):\n self.output_analytics = self.run_inference()\n self.output_df = pd.DataFrame(self.output_analytics)", "def perform_analysis(self):\n analysis_type = self._analysis_type_variable.get()\n self.parent.set_analysis_type(analysis_type)\n\n # Validates the appropriateness of the data for regression\n if analysis_type == 'Regression':\n num_cols = []\n for col in list(self._df.columns.values):\n data_type = self._df.dtypes[col]\n if data_type == np.int64 or data_type == np.float64:\n num_cols.append(col)\n\n if len(num_cols) < 2:\n messagebox.showerror('Error', 'Data is not appropriate for simple linear regression')\n return\n\n self.parent.select_columns()", "def test_alternative(df, hypothesis, alternative='two-sided', alpha=0.05):\n df['H0'] = hypothesis[alternative + '_H0']\n df['H1'] = hypothesis[alternative + '_H1']\n formatted_alpha = round(alpha*100, 2)\n conclusion = 'There is no evidence' if df['p-val'][0] > alpha else 'There is evidence'\n df['Result'] = f'{conclusion} to reject the null hypothesis at {formatted_alpha}% significance'\n return df", "def test_9():\n table = pandas.read_csv('data/salary_in_various_regions.csv')\n query_result = show.show(table,\n metric='Salary(in $)' ,\n dimensions=['Resident City'] ,\n summary_operator=SummaryOperators.MEAN)\n print(query_result)\n expected_result = \"\"\" Resident City MEAN of Salary(in $)\n0 Chicago 1.658889e+05\n1 Palo Alto 3.033333e+04\n2 Washington 2.002740e+07\"\"\"\n\n expected_suggestions = \"[{'suggestion': 'Median is very different from the Mean', 'oversight': <Oversights.MEAN_VS_MEDIAN: 7>, 'is_row_level_suggestion': True, 'confidence_score': 3.1249999406334665, 'row_list': [{'row': 3, 'confidence_score': 3.1249999406334665}]}]\"\n\n assert(expected_result == query_result[0].to_string())\n assert(expected_suggestions == str(query_result[1]))", "def analysis_of_dataframe(self, dataframe):\n\t\ttypes = self.data.type.unique()\n\t\tratings = self.data.rating.unique()\n\n\t\tprint \"\"\n\n\t\t# First analysis section\n\t\tfor rating in ratings:\n\t\t\tpercentage = format(self.data.rating.value_counts()[rating] / len(self.data.index), '.6f')\n\n\t\t\t# Print probability data\n\t\t\tprint \"Prob(rating={}) = {}\".format(rating, percentage)\n\n\t\tprint \"\"\n\n\t\t# Second analysis section\n\t\tfor rating in ratings:\n\t\t\tfor type in types:\n\n\t\t\t\t# Get sub-set dataframe\n\t\t\t\ttemp_dataframe = self.data[self.data['rating'] == rating]\n\n\t\t\t\t# Get conditional probability\n\t\t\t\ttry:\n\t\t\t\t\tpercentage = format(temp_dataframe.type.value_counts()[type] / len(temp_dataframe.index), '.6f')\n\n\t\t\t\t# Current type not found in temp_dataframe\n\t\t\t\texcept KeyError:\n\t\t\t\t\tpercentage = format(0, '.6f')\n\n\t\t\t\t# Print probability data\n\t\t\t\tfinally:\n\t\t\t\t\tprint \"Prob(type={}|rating={}) = {}\".format(type, rating, percentage)", "def ANOVA_one_way(a):\r\n #a = array(a)\r\n group_means = []\r\n group_variances = []\r\n num_cases = 0 # total observations in all groups\r\n all_vals = []\r\n for i in a:\r\n num_cases += len(i)\r\n group_means.append(mean(i))\r\n group_variances.append(i.var(ddof=1) * (len(i) - 1))\r\n all_vals.extend(i)\r\n\r\n # Get within Group variances (denominator)\r\n dfd = num_cases - len(group_means)\r\n # need to add a check -- if the sum of the group variances is zero it will\r\n # error, but only if the between_Groups value is not zero\r\n within_Groups = sum(group_variances) / dfd\r\n if within_Groups == 0.:\r\n return nan, nan\r\n # Get between Group variances (numerator)\r\n all_vals = array(all_vals)\r\n grand_mean = all_vals.mean()\r\n between_Groups = 0\r\n for i in a:\r\n diff = i.mean() - grand_mean\r\n diff_sq = diff * diff\r\n x = diff_sq * len(i)\r\n between_Groups += x\r\n\r\n dfn = len(group_means) - 1\r\n between_Groups = between_Groups / dfn\r\n F = between_Groups / within_Groups\r\n return F, f_high(dfn, dfd, F)", "def analyze():\n user = Staff.is_login()\n if user is None:\n return redirect(url_for('auth.login'))\n\n pengusulans = Pengusulan.get_all()\n ranks = Pengusulan.calculate_averages(pengusulans)\n return render_template(\"pengusulan/analisa-table.html\", pengusulans=pengusulans, ranks=ranks, pengusulan_code=pengusulan_code, status=pengusulan_code.DIUSULKAN)", "def analyze_show():\n def mat_to_title(mat_file):\n mat_split = mat_file.split('_')\n while (mat_split.pop() not in ANALYSIS_METHODS):\n pass\n return string.join(mat_split,'_') + '*.mat'\n\n plotables = []\n for mat_file in Args.plotable_files:\n plotables.extend(\n [\n ((val.squeeze(),key), \"{0}: {1}\".format(mat_to_title(mat_file),key))\n for key,val in scipy.io.loadmat(mat_file).viewitems()\n if not (key.startswith('__') and key.endswith('__'))\n ]\n )\n ana_plot_graphs(*zip(*plotables),show=True)", "def run():\n\n import matplotlib.pyplot as plt\n\n anomalies_t = []\n anomalies_v = []\n anomalies_c = []\n\n all_t = []\n all_v = []\n\n rows = []\n for i, row in dataSet.iterrows():\n\n inputData = row.to_dict()\n\n detectorValues = handleRecord(inputData)\n\n if (detectorValues[0] > 0.65):\n anomalies_t.append(inputData[\"timestamp\"])\n anomalies_v.append(inputData[\"value\"])\n anomalies_c.append(detectorValues[0])\n\n all_t.append(inputData[\"timestamp\"])\n all_v.append(inputData[\"value\"])\n\n outputRow = list(row) + list(detectorValues)\n\n rows.append(outputRow)\n\n # Progress report\n if (i % 1000) == 0:\n print \".\",\n sys.stdout.flush()\n\n fig, ax = plt.subplots()\n\n ax.plot(all_t, all_v)\n ax.plot(anomalies_t, anomalies_v, 'ro')\n\n plt.show()\n\n ans = pandas.DataFrame(rows)\n return ans", "def main():\n # openfile allows for CSV files with stored data of two columns\n # data = openfile(\"filename\")\n data = get_data()\n abtest = AB_test(data)\n abtest.stats()\n abtest.print_stats()", "def technical_analysis(df, technical_analysis_choice):\n\n \n\n if technical_analysis_choice == \"Moving Average\":\n print(\"\\n---The Moving Average can define the main trend of the stock.---\\n\")\n print(\"......\")\n print(\"The system use 7 days and 21 day moving average as buy and sell signals.\\n\")\n print(\"Buy signal: The 7 days moving average is above the 21 day moving average.\\n\")\n print(\"Sell signal: The 7 days moving average is below the 21 day moving average.\\n\")\n print(\"......\")\n return moving_average_analysis(df)\n else:\n print(\"\\n---The Mean and Variance can identify the stock's historical performance---\\n\")\n print(\"......\")\n return mean_variance_analysis(df)" ]
[ "0.6850544", "0.6621147", "0.6502546", "0.6270873", "0.62652534", "0.6217322", "0.60624397", "0.6005953", "0.5985062", "0.59251034", "0.5853259", "0.58106", "0.5775162", "0.5741587", "0.57148474", "0.5647258", "0.5563191", "0.556098", "0.55328804", "0.55215967", "0.5488164", "0.547641", "0.5444261", "0.5425009", "0.5407884", "0.53991693", "0.53973114", "0.53828764", "0.5378897", "0.5377551" ]
0.7318326
0
test2 takes in the grosses data set and the rating data set prepares the data performs a logistic regression to test the hypothesis 2 prints out the regression results
def hypoTest2(df, rt): from sklearn import preprocessing import statsmodels.api as sm gs = df ratings = rt # limit the time scope to recent 5 years testData2 = gs[gs['year']>=2015] testData2 = testData2[['show','year','month','this_week_gross']] # calculate avg weekly grosses mean (by show) testData2['avg_weekly_gross'] = testData2.groupby('show')['this_week_gross'].transform('mean') testData2_1 = pd.merge(testData2, ratings, on='show') # select distinct show testData2_1 = testData2_1.drop_duplicates('show') # Select relevant columns testData2_1 = testData2_1[['show', 'avg_weekly_gross', 'total_rating']] testData2_1['ratingLevel'] = 0 testData2_1.loc[(testData2_1.total_rating > 7), 'ratingLevel'] = 1 # normalize avg_weekly_gross mm_scaler = preprocessing.MinMaxScaler() mm_scaler.fit(testData2_1[['avg_weekly_gross']]) testData2_1['norm_gross'] = mm_scaler.transform(testData2_1[['avg_weekly_gross']]) # logistic regression X = sm.add_constant(testData2_1['norm_gross']) logit1 = sm.Logit(testData2_1['ratingLevel'], X) result1 = logit1.fit() # summarize the results print(result1.summary()) # get the odds print() print("The odds-ratios are as the following:") print() print(np.exp(result1.params))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_g2(self):\n self.setUp()\n theta = self.data.theta\n beta_0, beta_1 = theta[\"beta_0\"], theta[\"beta_1\"]\n gamma_0, gamma_1 = theta[\"gamma_0\"], theta[\"gamma_1\"]\n g2 = self.E_func.g2(self.S, gamma_0, beta_0, gamma_1, beta_1)\n # values of g2 at first group and first sample\n g2_0_1 = np.array([347/3, 326/3, 218/3, 266/3])\n # values of g2 at second group and second sample\n g2_1_3 = np.array([153 , 178.5, 151.5, 67.5])\n np.testing.assert_almost_equal(g2[0, 0], g2_0_1)\n np.testing.assert_almost_equal(g2[1, 1], g2_1_3)", "def trainRegressionModel(X,y):\n # # instantiate a logistic regression model, and fit with X and y\n # model = LogisticRegression()\n # model = model.fit(X, y)\n # # check the accuracy on the training set\n # print(model.score(X, y))\n #X['intercept'] = 1.0\n #del X['isCapitalized']\n #del X['isNN']\n #del X['isNNP']\n #del X['isJJ']\n #del X['isUpper']\n #del X['isPrecedingIN']\n logit = sm.Logit(y, X)\n result = logit.fit()\n print(result.summary())\n print(result.conf_int())\n model = LogisticRegression()\n model = model.fit(X, y)\n print(model.score(X, y))\n print(y.mean())\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)\n model2 = LogisticRegression()\n model2.fit(X_train, y_train)\n # predict class labels for the test set\n predicted = model.predict(X_test)\n print(predicted)\n for i in predicted:\n if i==1:\n print(\"Test:\"+str(i))\n print(max(predicted))\n #generate class probabilities\n probs = model2.predict_proba(X_test)\n print(probs)\n # generate evaluation metrics\n print(\"Accuracy: \"+str(metrics.accuracy_score(y_test, predicted)))\n print(\"AUC: \"+str(metrics.roc_auc_score(y_test, probs[:, 1])))\n print(metrics.confusion_matrix(y_test, predicted))\n print(metrics.classification_report(y_test, predicted))\n\n from sklearn.cross_validation import cross_val_score\n # evaluate the model using 10-fold cross-validation\n scores = cross_val_score(LogisticRegression(), X, y, scoring='accuracy', cv=10)\n print(scores)\n print(scores.mean())", "def create_logistic_regression():\n\n pause_data = shuffle(pd.read_csv(sys.argv[1]))\n pause_data = pause_data.replace([np.inf, -np.inf], np.nan).dropna()\n # X = pause_data.drop([HAS_DEMENTIA, TRANSCRIPT_ID], axis=1)\n X = pause_data[MEMORY_FEATURES]\n y = pause_data[HAS_DEMENTIA]\n split_tracker = []\n rskf = RepeatedStratifiedKFold(n_splits=10, n_repeats=2, random_state=36851234)\n # n_repeats 10 too\n for train_index, test_index in rskf.split(X, y):\n X_train, X_test = X.iloc[list(train_index)], X.iloc[list(test_index)]\n y_train, y_test = y.iloc[list(train_index)], y.iloc[list(test_index)]\n logmodel = LogisticRegression()\n logmodel.fit(X_train, y_train)\n predictions = logmodel.predict(X_test)\n split_tracker.append({\n TRAIN: train_index,\n TEST: test_index,\n PREDICTIONS: predictions,\n Y_TEST: y_test\n })\n accuracy = []\n f1 = []\n auc = []\n print(\"Predictions\", split_tracker[0])\n for predictions in split_tracker:\n # print(classification_report(predictions[Y_TEST], predictions[PREDICTIONS]))\n accuracy.append(accuracy_score(predictions[Y_TEST], predictions[PREDICTIONS]))\n f1.append(f1_score(predictions[Y_TEST], predictions[PREDICTIONS]))\n auc.append(roc_auc_score(predictions[Y_TEST], predictions[PREDICTIONS]))\n print(accuracy)\n accuracy = np.array(accuracy)\n f1 = np.array(f1)\n auc = np.array(auc)\n print(len(accuracy))\n print('mean accuracy: ', accuracy.mean())\n print('mean f1 score: ', f1.mean())\n print('mean auc: ', auc.mean())", "def learn2_sgd():\n\n sgd.fit(vector_training,sentiment_training) ##fits the training data of vector tweets and sentiments using SGDClassifier\n correct = 0\n for i in range(vector_testing.shape[0]): ##using the testing, data see how accurate SGDC is\n prediction = sgd.predict(vector_testing[i])\n sentiment = sentiment_testing[i]\n if prediction[0] == sentiment:\n correct +=1\n \n accuracy = correct/vector_testing.shape[0]\n print('Stochastic Gradient Descent Classifier Testing Accuracy: {:.2f}'.format(accuracy)) ##prints the accuracy of the algorithm", "def test_logistic_regression_c_parameter(params, X_train, X_test, y_train, y_test):", "def regression(features, scores, test_size=.3, save_path='results/',\n verbose=False, normalize=True, save_results=True):\n\n features, scores = shuffle(features, scores, random_state=0)\n\n if normalize:\n if verbose:\n print(\"[INFO] Normalizing Data\")\n scaler = StandardScaler()\n scaler.fit(features)\n features = scaler.transform(features)\n\n if verbose:\n print(\"[INFO] Spliting data into test and train\")\n\n x_train, x_test, y_train, y_test = train_test_split(features, scores, test_size=test_size)\n\n if verbose:\n print(\"[INFO] Setitng scores from competence 1 appart from the others\")\n y_train_c1 = y_train[:, 1]\n y_test_c1 = y_test[:, 1]\n\n if verbose:\n print(\"[INFO] Performing linear regression over the data\")\n reg = LinearRegression()\n reg.fit(x_train, y_train_c1)\n\n if verbose:\n print(\"[INFO] Computing the R2 score of the predictions\")\n\n predictions = reg.predict(x_test)\n\n mean_scores = y_test_c1.sum() / y_test_c1.shape[0]\n squared_sum_desired = ((y_test_c1 - mean_scores) ** 2).sum()\n squared_sum_regression = ((y_test_c1 - predictions) ** 2).sum()\n\n error = predictions - y_test_c1\n mean_error = error.sum() / predictions.shape[0]\n # standard deviation\n stdd = np.sqrt(((error - mean_error) ** 2).sum() / error.shape[0])\n\n R2_SCORE = 1 - squared_sum_regression / squared_sum_desired\n\n if verbose:\n print(\"[RESULT] R2 for a linear model: \", R2_SCORE)\n print(\"[RESULT] Desired squared sum: \", squared_sum_desired)\n print(\"[RESULT] Desired sum regression: \", squared_sum_regression)\n print(\"[RESULT] Mean error: \", mean_error)\n print(\"[RESULT] Error standard deviation: \", stdd)\n\n if save_results:\n if verbose:\n print(\"[INFO] Saving Results\")\n if not os.path.exists(save_path):\n os.mkdir(save_path)\n\n with open(save_path + \"eval_regression.txt\", 'w') as f:\n string_output = \"R2 for a linear model: \" + str(R2_SCORE) + \" \\n\"\n string_output += \"Desired squared sum: \" + str(squared_sum_desired) + \" \\n\"\n string_output += \"Desired sum regression: \" + str(squared_sum_regression) + \" \\n\"\n string_output += \"Mean error: \" + str(mean_error) + \" \\n\"\n string_output += \"Error standard deviation: \" + str(stdd) + \" \\n\"\n f.write(string_output)\n f.close()\n\n return reg", "def main():\n args = parse_argument()\n train_file = args['train'][0]\n test_file = args['test'][0]\n print train_file, test_file\n \n user_ratings_train, movie_ratings_train=parse_file(train_file)\n ave_ratings=compute_average_user_ratings(user_ratings_train)\n user_train=list(user_ratings_train.keys())\n with open(test_file,'r') as test:\n with open('predictions.txt', 'w') as pred:\n writer = csv.writer(pred)\n prediction=list()\n actual=list()\n for row in csv.reader(test):\n num_sum=0.0\n sim_sum=0.0\n user=int(row[1])\n movie=int(row[0])\n other_users_ratings=movie_ratings_train[movie]\n other_users=other_users_ratings.keys()\n for i in range(len(other_users)): \n other_user=other_users[i]\n similar=compute_user_similarity(user_ratings_train[user],user_ratings_train[other_user],ave_ratings[user],ave_ratings[other_user])\n num_sum=num_sum+similar*(float(movie_ratings_train[movie][other_user])-float(ave_ratings[other_user]))\n sim_sum = sim_sum+abs(similar)\n #No similar users\n try:\n pred_rating=ave_ratings[user]+num_sum/sim_sum\n except ZeroDivisionError:\n pred_rating=ave_ratings[user]\n prediction.append(pred_rating)\n actual.append(row[2])\n writer.writerow(row+[pred_rating])\n actual_np=np.array(map(float, actual))\n prediction_np=np.array(prediction)\n rmse=np.sqrt(((prediction_np - actual_np)** 2).mean())\n mae=np.absolute(prediction_np - actual_np).mean()\n print \"RMSE\",round(rmse,4)\n print \"MAE\",round(mae,4)", "def LogisticRegression_self_test(X_train, X_test, y_train, y_test, learning_rates, epochs, iteration):\n\n\t# scoping number of training samples\n\n\tn_inputs = X_train.shape[0]\n\tn_features = X_train.shape[1]\n\n\t\n\n\teta_ = 1e-12\n\tbeta_opt = np.random.randn(X_train.shape[1], 2)\n\tcalc_beta_GD, norm = GradientDescent(X_train, beta_opt, y_train, iteration, eta_)\n\tprob_GD, predict_GD= Probability_GD(X_test, calc_beta_GD) #defining values to be between 0 and 1\n\t#yPred_GD = (predict_GD >= 0.5).astype(int) # converting to just 0 or 1\n\n\t#Define Logistic regression\n\tclf = LogisticRegression(solver='lbfgs', max_iter=1e5)\n\tclf = clf.fit(X_train, np.ravel(y_train))\n\tpred_sklearn = clf.predict(X_test)\n\tprob_sklearn = clf.predict_proba(X_test)\n\t#print(prob_sklearn)\n\n\t#for eta in np.logspace(np.log10(1e-6), np.log10(1e0), 7):\n\taccuracy = np.zeros(len(learning_rates))\n\tauc_score = np.zeros(len(learning_rates))\n\n\tfor i, eta in enumerate(learning_rates):\n\t\tbeta_SGD = stochastic_gradient_descent(X_train, beta_opt, y_train, eta, epochs, iteration)\n\t\tprob_SGD, predict_SGD= Probability(X_test, beta_SGD) #defining values to be between 0 and 1\n\t\t\n\t\t\n\t\taccuracy[i] = metrics.accuracy_score(y_test, predict_SGD)\n\t\tauc_score[i] = metrics.roc_auc_score(y_test, predict_SGD)\n\t\tdifference = y_test - predict_SGD\n\n\t\t\n\n\t\tif i> 0 and auc_score[i] > auc_score[i-1]:\n\t\t\tbest_pred_SGD= predict_SGD\n\t\t\tbest_prob_SGD = prob_SGD\n\t\n\n\t\tprint('Accuracy {}, learning rate= {}, iterations = {}'.format(accuracy[i], eta, iteration))\n\t\n\t\tprint('Auc score: {}'.format(auc_score[i]))\n\n\n\t\t\"\"\"\n\t\tplt.plot(yPred, label='predict')\n\t\tplt.plot(optimal_beta, label ='optimal beta')\n\t\tplt.plot(y_test, label='test')\n\t\tplt.show()\n\t\t\"\"\"\n\n\tsns.set()\n\tsns.heatmap(pd.DataFrame(accuracy), annot= True, fmt='.4g')\n\tplt.title('Grid-search for logistic regression')\n\tplt.ylabel('Learning rate: $\\\\eta$')\n\tplt.xlabel('Regularization Term: $\\\\lambda$')\n\t#plt.xticks(ticks=np.arange(len(learning_rates)) + 0.5, labels=learning_rates)\n\t#plt.yticks(ticks=np.arange(len(lambda_values)) + 0.5, labels=lambda_values)\n\tb, t = plt.ylim() # discover the values for bottom and top\n\tb += 0.5 # Add 0.5 to the bottom\n\tt -= 0.5 # Subtract 0.5 from the top\n\tplt.ylim(b, t) # update the ylim(bottom, top) values\n\t#plt.savefig('accuracy_logreg.png')\n\tplt.show()\n\n\tsns.heatmap(pd.DataFrame(auc_score), annot= True, fmt='.4g')\n\tplt.title('Grid-search for logistic regression')\n\tplt.ylabel('Learning rate: $\\\\eta$')\n\tplt.xlabel('Regularization Term: $\\\\lambda$')\n\t#plt.xticks(ticks=np.arange(len(learning_rates)) + 0.5, labels=learning_rates)\n\t#plt.yticks(ticks=np.arange(len(lambda_values)) + 0.5, labels=lambda_values)\n\tb, t = plt.ylim() # discover the values for bottom and top\n\tb += 0.5 # Add 0.5 to the bottom\n\tt -= 0.5 # Subtract 0.5 from the top\n\tplt.ylim(b, t) # update the ylim(bottom, top) values\n\t#plt.savefig('auc_score_logreg.png')\n\tplt.show()\n\n\t#plot confusion matrix\n\tConfusion_Matrix(y_test, predict_GD)\n\t#Confusion_Matrix(y_test, best_pred_SGD)\n\t#Confusion_Matrix(y_test, pred_sklearn)\n\n\t#diff = np.concatenate((1- predict, predict), axis=1)\n\n\tdiff_sklearn = np.concatenate((1- prob_sklearn, prob_sklearn), axis=1)\n\tdiff_GD = np.concatenate((1- prob_GD, prob_GD), axis=1)\n\tdiff_SGD = np.concatenate((1- best_prob_SGD, best_prob_SGD), axis=1)\n\n\t#plot roc curves\n\tplot_roc(y_test, prob_sklearn)\n\tplot_roc(y_test, diff_SGD)\n\tplot_roc(y_test, prob_GD)\n\tplt.show()\n\n\t#plot cumulative gain curves\n\tplot_cumulative_gain(y_test, prob_sklearn)\n\tax = plot_cumulative_gain(y_test, diff_SGD)\n\tplot_cumulative_gain(y_test, prob_GD)\n\t#plt.show()\n\n\n\n\t\"\"\"\n\t#plot roc curves\n\tplot_roc(y_test, diff_sklearn, plot_micro=False, plot_macro= False)\n\tplot_roc(y_test, diff_GD, plot_micro=False, plot_macro= False)\n\tplot_roc(y_test, diff_SGD, plot_micro=False, plot_macro= False)\n\tplt.show()\n\n\t#plot cumulative gain curves\n\tplot_cumulative_gain(y_test, diff_sklearn)\n\tplot_cumulative_gain(y_test, diff_GD)\n\tplot_cumulative_gain(y_test, diff_SGD)\n\tplt.show()\t\n\n\t\"\"\"\n\n\tmodel_curve = auc_score\n\tarea_baseline = 0.5\n\tarea_ratio = (model_curve - area_baseline)/(area_baseline)\n\tprint('Area Ratio:',area_ratio)\n\n\n\treturn accuracy, learning_rates", "def test():\n X,Xval,Yval = _load_sample_data()\n mu,var = estimate_gaussian_params(X)\n pval = get_probability(Xval,mu,var)\n\n figure()\n plot(X[:,0],X[:,1],'b+',label='data'); xlabel(\"Latency (ms)\"); ylabel(\"Throughput (Mb/s)\")\n epsilon, F1 = determine_threshold(Yval,pval)\n print(\"Optimal epsilon and F1 score for sample dataset {}, {}\".format(epsilon, F1))\n plot_gaussian(mu,var,epsilon=epsilon)\n\n ## Plot Outliers\n predictions = get_probability(X,mu, var)\n outliers = X[predictions < epsilon]\n plot(outliers[:,0],outliers[:,1],'ro',mfc=None,label='outliers');\n legend()\n grid()", "def process_data(self):\n y = self.df['gross']\n x = self.df['imdb_score']\n\n # plt.scatter(x, y, color='blue', label=\"data\")\n # plt.xlabel(\"imdb_score\")\n # plt.ylabel(\"gross\")\n\n # need to fit an exponential data set\n popt, pcov = curve_fit(func, x, y)\n # popt is parameters\n # X = np.arange(0.0, 10.0, 0.1)\n # plt.plot(X, func(X, popt[0], popt[1], popt[2]), 'r-', label=\"fit\")\n # plt.legend(loc=\"best\")\n # plt.show()\n\n # metrics.accuracy_score for accuracy\n acc = r2_score(y, func(x, popt[0], popt[1], popt[2]))\n\n return {\"param\": popt, \"acc\": acc}", "def run_regression(train_embeds, train_labels, test_embeds, test_labels):\n np.random.seed(1)\n from sklearn.linear_model import SGDClassifier\n from sklearn.dummy import DummyClassifier\n from sklearn.metrics import f1_score\n dummy = DummyClassifier()\n dummy.fit(train_embeds, train_labels)\n log = SGDClassifier(loss=\"log\", n_jobs=10, tol=1e-3)\n log.fit(train_embeds, train_labels)\n print(\"F1 score:\", f1_score(test_labels, log.predict(test_embeds), average=\"micro\"))\n print(\"Random baseline f1 score:\", f1_score(test_labels, dummy.predict(test_embeds), average=\"micro\"))", "def analysis(houses:pd.DataFrame) -> None:\n \n \"\"\"\n #Me just trying to fit the data without any outside influences\n f= f'SELLER_HOUSE ~ SQFT_PER + PRICE + C(LOCATION)' \n result= smf.logit(formula= str(f), data= houses).fit()\n print(result.summary2())\n y= ['SELLER_HOUSE']\n x= ['SQFT_PER', 'PRICE', 'LOC_699 - Not Defined', 'LOC_AA - Airport Area', 'LOC_CG - Columbus Grove',\n 'LOC_CV - Cypress Village', 'LOC_EASTW - Eastwood', 'LOC_EC - El Camino Real', 'LOC_GP - Great Park',\n 'LOC_IRSP - Irvine Spectrum', 'LOC_LGA - Laguna Altura', 'LOC_NK - Northpark', 'LOC_NW - Northwood', \n 'LOC_OC - Oak Creek', 'LOC_OH - Orchard Hills', 'LOC_OT - Orangetree', 'LOC_PS - Portola Springs', \n 'LOC_QH - Quail Hill', 'LOC_SH - Shady Canyon', 'LOC_SJ - Rancho San Joaquin', 'LOC_STG - Stonegate', \n 'LOC_Stonegate', 'LOC_TR - Turtle Rock', 'LOC_TRG - Turtle Ridge', 'LOC_UP - University Park',\n 'LOC_UT - University Town Center', 'LOC_WB - Woodbridge', 'LOC_WD - Woodbury', \n 'LOC_WI - West Irvine', 'LOC_WN - Walnut (Irvine)', 'LOC_WP - Westpark']\n x_train, x_test, y_train, y_test= train_test_split(houses[x], houses[y], test_size= 0.3, random_state= 500)\n logreg = LogisticRegression()\n logreg.fit(x_train, y_train.values.ravel())\n y_pred= logreg.predict(x_test)\n print('Accuracy of logistic regression classifier on test set:', round(logreg.score(x_test, y_test), 3))\n # This model is really bad\n \n \"\"\"\n \n \"\"\n houses= houses.drop(['DAYS_ON_MARKET', 'ADDRESS', 'LOCATION',\n 'STATUS', 'PROPERTY_TYPE', 'ZIP_CODE'], axis= 1)\n columns= houses.columns.values.tolist()\n y= ['SELLER_HOUSE']\n x= [i for i in columns if i not in y]\n \n # Over Sampling Using SMOTE \n x_train, _, y_train, _= train_test_split(houses[x], houses[y], test_size= 0.3, random_state= 500)\n x_columns= x_train.columns\n \n os= SMOTE(random_state= 0)\n os_x, os_y= os.fit_sample(x_train, y_train)\n os_x= pd.DataFrame(data= os_x, columns= x_columns)\n os_y= pd.DataFrame(data= os_y, columns= y)\n \n \n #Recursive Feature Elimination\n logreg= LogisticRegression(max_iter= 600)\n rfe= RFE(logreg, 20)\n rfe= rfe.fit(os_x, os_y.values.ravel())\n \n lst= [i for count, i in enumerate(x) if rfe.support_[count] == True]\n X= os_x[lst]\n Y= os_y['SELLER_HOUSE']\n \n \n #logit_model= sm.Logit(Y, X)\n #result= logit_model.fit()\n #print(result.summary2()) # Model choosen by RCE\n \n #These are features have a p-value less than 0.05\n final_x= ['BATHS', 'ZIP_92602.0', 'ZIP_92618.0', 'LOC_699 - Not Defined', 'LOC_TR - Turtle Rock', 'LOC_WD - Woodbury']\n #final_x= ['ZIP_92602.0', 'LOC_699 - Not Defined', 'LOC_TR - Turtle Rock', 'LOC_WD - Woodbury']\n X2= os_x[final_x]\n \n logit_model2= sm.Logit(Y, X2)\n result2= logit_model2.fit()\n print(result2.summary2()) # Final Model\n \n x_train2, x_test2, y_train2, y_test2= train_test_split(X2, Y, test_size= 0.3, random_state= 500)\n logreg = LogisticRegression()\n logreg.fit(x_train2, y_train2)\n \n y_pred= logreg.predict(x_test2)\n print('Accuracy of logistic regression classifier on test set:', round(logreg.score(x_test2, y_test2), 2))\n \n conf_matrix= confusion_matrix(y_test2, y_pred)\n print(conf_matrix)\n # So 22+61 correct predictions and 13+44 wrong predictions\n \n logit_roc_auc = roc_auc_score(y_test2, logreg.predict(x_test2))\n fpr, tpr, _ = roc_curve(y_test2, logreg.predict_proba(x_test2)[:,1])\n plt.figure()\n plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc)\n plt.plot([0, 1], [0, 1],'r--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver operating characteristic')\n plt.legend(loc=\"lower right\")\n plt.show()\n \"\"", "def fit_logistic_regression():\n\n logger.debug(\"Running the fit_logistic_regression function now\")\n\n #Loading the configuration\n with open(os.path.join(\"config\",\"config.yml\"), \"r\") as f:\n config = yaml.safe_load(f)\n\n #Loading and pre processing the data\n logger.debug(\"Loading and pre processing the data\")\n train_df = load_data(config[\"load_data\"][\"train_file\"])\n train_df = pre_process_data(train_df, resample = True, resample_count = 500000)\n\n #Defining Pipeline\n pipeline = Pipeline([\n ('tfidf', TfidfVectorizer(analyzer='word', token_pattern=r'[A-Za-z0-9@-]+')),\n ('model', LogisticRegression(random_state=12345, verbose = 1, solver = 'saga')),\n ])\n\n #Defining parameters to vary\n parameters = {\n 'tfidf__max_df': (0.25, 0.5, 0.75),\n 'tfidf__max_features': (None, 5000, 10000, 50000),\n 'tfidf__ngram_range': ((1, 1), (1, 2)),\n 'model__C': (0.01, 1, 100)\n }\n\n scoring_list = [\"accuracy\", \"f1\", \"precision\", \"recall\", \"roc_auc\"]\n \n #Performing 5fold CV to determine best hyperparameters\n model = GridSearchCV(pipeline, parameters, cv=5,\n n_jobs=-1, verbose=1, scoring=scoring_list, refit='f1',)\n\n t0 = datetime.datetime.now()\n\n model.fit(train_df[\"Review\"].tolist(), train_df[\"Ratings\"].to_numpy())\n \n logger.info(\"Grid Search performed in {}\".format(str(datetime.datetime.now()-t0)))\n\n #Saving results\n res_df = pd.DataFrame(model.cv_results_)\n res_df.to_csv(os.path.join(config[\"summary_stats\"][\"save_location\"], \"LogisticRegressionResults.csv\"))\n \n #Saving the model\n pickle.dump(model, open(os.path.join(config[\"models\"][\"save_location\"], \"LogisticRegression.pkl\"),'wb'))\n\n return", "def classify():\n yes_dataset = df[df[\"_class\"] == 1] # 470588\n no_dataset = df[df[\"_class\"] == 0] # 1971\n\n parameter_analysis = list()\n for criterion in np.arange(0.05, 0.91, 0.05):\n print(\"doing experiment at criterion = %s ...\" % criterion)\n rate_list = list()\n for i in range(10):\n # shuffle yes_dataset and no_dataset, so we can randomly choose 90% yes_dataset\n # + 90% no_dataset as train dataset, 10% yes_dataset + 10% no_dataset as test dataset\n yes_index = yes_dataset.index.tolist()\n random.shuffle(yes_index)\n no_index = no_dataset.index.tolist()\n random.shuffle(no_index)\n \n # concatenate 90%yes + 90%no, 10%yes + 10%no\n train = pd.concat([\n yes_dataset.loc[yes_index[:1774], :],\n no_dataset.loc[no_index[:423530], :]\n ])\n test = pd.concat([\n yes_dataset.loc[yes_index[1774:], :],\n no_dataset.loc[no_index[423530:], :]\n ]) \n \n # split data and label\n train_data, train_label = (train[[\"Revenue.Code\", \n \"Service.Code\", \n \"Procedure.Code\", \n \"Diagnosis.Code\", \n \"Subscriber.Index\"]], \n train[\"_class\"])\n test_data, test_label = (test[[\"Revenue.Code\", \n \"Service.Code\", \n \"Procedure.Code\", \n \"Diagnosis.Code\", \n \"Subscriber.Index\"]], \n test[\"_class\"])\n \n # apply classifier\n clf = GaussianNB()\n clf.fit(train_data, train_label)\n probability = clf.predict_proba(test_data).T[1]\n \n result = pd.DataFrame()\n result[\"_class\"] = test_label\n result[\"_predict\"] = probability >= criterion\n \n result_yes = result[result[\"_class\"] == 1]\n yes_yes_rate = sum(result_yes[\"_class\"] == result_yes[\"_predict\"])/len(result_yes[\"_predict\"])\n \n result_no = result[result[\"_class\"] == 0]\n no_no_rate = sum(result_no[\"_class\"] == result_no[\"_predict\"])/len(result_no[\"_predict\"])\n \n rate_list.append((yes_yes_rate, no_no_rate))\n\n rate_list = pd.DataFrame(rate_list)\n yes_yes_rate, no_no_rate = rate_list.mean()[0], rate_list.mean()[1]\n parameter_analysis.append((criterion, yes_yes_rate, no_no_rate))\n \n # save data to excel spreadsheet\n parameter_analysis = pd.DataFrame(parameter_analysis, columns=[\"criterion\", \"yes_yes_rate\", \"no_no_rate\"])\n writer = pd.ExcelWriter(\"parameter_analysis.xlsx\")\n parameter_analysis.to_excel(writer, \"parameter_analysis\", index=False)\n writer.save()", "def test_sklearn_log_loss(self):\n X, y = datasets.load_breast_cancer(return_X_y=True)\n\n star = sb.BoostingClassifier(\n loss=sb.losses.LogLoss(),\n init_estimator=ScikitLearnLogOdds(),\n base_estimator=tree.DecisionTreeRegressor(max_depth=3, random_state=42),\n base_estimator_is_tree=True,\n n_estimators=30,\n learning_rate=0.1,\n )\n star = star.fit(X, y)\n\n scikit = ensemble.GradientBoostingClassifier(\n loss='deviance',\n max_depth=3,\n n_estimators=30,\n learning_rate=0.1,\n random_state=42\n )\n scikit = scikit.fit(X, y)\n\n for y1, y2 in zip(star.iter_predict_proba(X), scikit.staged_predict_proba(X)):\n np.testing.assert_allclose(y1, y2, rtol=1e-5)", "def evaluate(self, X_test, y_test):\n y_pred_train = self.pipeline.predict(self.X)\n mse_train = mean_squared_error(self.y, y_pred_train)\n rmse_train = np.sqrt(mse_train)\n \n self.mlflow_log_metric('rmse_train', rmse_train)\n \n y_pred_test = self.pipeline.predict(X_test)\n mse_test = mean_squared_error(y_test, y_pred_test)\n rmse_test = np.sqrt(mse_test)\n self.mlflow_log_metric('rmse_test', rmse_test)\n \n return (round(rmse_train, 3) ,round(rmse_test, 3))", "def eg2(r_train, r_test, N_train=1000, N_test=500):\n\n def eg2_kernel(r, N):\n X1 = np.random.randn(N)\n X2_1 = np.exp(X1) + 0.1 * np.random.randn(N) # add noise or not?\n X2_2 = np.random.randn(N)\n X2_prob = np.random.uniform(0, 1, N)\n X2 = np.where(X2_prob < r, X2_1, X2_2)\n X3 = np.random.randn(N)\n X4 = np.random.randn(N)\n Y = 210 + 27.4 * X1 + 13.7 * X3 + 13.7 * X4 + np.random.randn(N)\n\n data = {}\n data['X1'] = X1\n data['X2'] = X2\n data['X3'] = X3\n data['X4'] = X4\n data['Y'] = Y\n return data\n\n data_train = eg2_kernel(r_train, N_train)\n data_test = eg2_kernel(r_test, N_test)\n\n return data_train, data_test", "def experiment1_outliers():\n\tdata_folder = \"ckan_subset/prepared_learnset/\"\n\ttest_folder = 'ckan_subset/testset/xml_csv/'\n\tgm = Graph_Maker()\n\tgm.store()\n\trounds = 5\n\tx = [\"Fingerprint\", \"Syntax Feature Model\", \"Word2Vec Matcher\"]\n\t\n\tnumber_of_classes = 15\n\texamples_per_class = 0\n\taccuracies = []\n\tprecisions = []\n\trecalls = []\n\tfmeasures = []\n\tsf_main = Storage_Files(data_folder, classes)\n\ttmp_acc = []\n\ttmp_prec = []\n\ttmp_rec = []\n\ttmp_fmeasure = []\n\ttotal_actual = []\n\ttotal_predicted = []\n\n\tfor i in range(0, rounds):\n\t\tprint(\"Fingerprint\")\n\t\t# --- Fingerprint\n\t\tccc = Column_Classification_Config()\n\t\tccc.add_feature('feature_main', 'Fingerprint', [sf_main, number_of_classes, examples_per_class, False, False])\n\n\t\tccc.add_matcher('matcher', 'Fingerprint_Matcher', {'feature_main': 'fingerprint'}) # main classifier\n\t\tsm = Schema_Matcher(ccc)\n\t\tactual, predicted = execute_test_ckan(sm, test_folder, False)\n\t\ttotal_actual += actual\n\t\ttotal_predicted += predicted\n\t\taccuracy = accuracy_score(actual, predicted)\n\t\ttmp_acc.append(accuracy)\n\t\ttmp_prec.append(precision(actual, predicted))\n\t\ttmp_rec.append(recall(actual, predicted))\n\t\ttmp_fmeasure.append(f_measure(actual, predicted))\n\n\taccuracies.append( round(sum(tmp_acc) / float(rounds), 2) )\n\tprecisions.append( round(sum(tmp_prec) / float(rounds), 2) )\n\trecalls.append( round(sum(tmp_rec) / float(rounds), 2) )\n\tfmeasures.append(round(sum(tmp_fmeasure) / float(rounds), 2))\n\tclassnames = list(set(get_class_names(total_actual) + get_class_names(total_predicted)))\n\tcm = confusion_matrix(total_actual, total_predicted, labels=classnames)\n\t#gm.plot_confusion_matrix(cm, classnames, normalize=True)\n\t\n\ttmp_acc = []\n\ttmp_prec = []\n\ttmp_rec = []\n\ttmp_fmeasure = []\n\ttotal_actual = []\n\ttotal_predicted = []\n\tfor i in range(0, rounds):\n\t\tprint(\"SFM\")\n\t\t# --- Syntax Feature Model\n\t\tccc = Column_Classification_Config()\n\t\tccc.add_feature('feature_main', 'Syntax_Feature_Model', [sf_main, 1, 0, False, False])\n\n\t\tccc.add_matcher('matcher', 'Syntax_Matcher', {'feature_main': 'syntax'}) # main classifier\n\t\tsm = Schema_Matcher(ccc)\n\t\tactual, predicted = execute_test_ckan(sm, test_folder, False)\n\t\ttotal_actual += actual\n\t\ttotal_predicted += predicted\n\t\taccuracy = accuracy_score(actual, predicted)\n\t\ttmp_acc.append(accuracy)\n\t\ttmp_prec.append(precision(actual, predicted))\n\t\ttmp_rec.append(recall(actual, predicted))\n\t\ttmp_fmeasure.append(f_measure(actual, predicted))\n\n\taccuracies.append( round(sum(tmp_acc) / float(rounds), 2) )\n\tprecisions.append( round(sum(tmp_prec) / float(rounds), 2) )\n\trecalls.append( round(sum(tmp_rec) / float(rounds), 2) )\n\tfmeasures.append(round(sum(tmp_fmeasure) / float(rounds), 2))\n\tclassnames = list(set(get_class_names(total_actual) + get_class_names(total_predicted)))\n\tcm = confusion_matrix(total_actual, total_predicted, labels=classnames)\n\t#gm.plot_confusion_matrix(cm, classnames, normalize=True)\n\n\ttmp_acc = []\n\ttmp_prec = []\n\ttmp_rec = []\n\ttmp_fmeasure = []\n\ttotal_actual = []\n\ttotal_predicted = []\n\tfor i in range(0, rounds):\n\t\tprint(\"W2V\")\n\t\t# --- Word2Vec Matcher\n\t\tccc = Column_Classification_Config()\n\t\tccc.add_feature('feature_main', 'Corpus', [sf_main, number_of_classes, examples_per_class, False, False])\n\n\t\tccc.add_matcher('matcher', 'Word2Vec_Matcher', {'feature_main': 'corpus'}) # main classifier\n\t\tsm = Schema_Matcher(ccc)\n\t\tactual, predicted = execute_test_ckan(sm, test_folder, False)\n\t\ttotal_actual += actual\n\t\ttotal_predicted += predicted\n\t\taccuracy = accuracy_score(actual, predicted)\n\t\ttmp_acc.append(accuracy)\n\t\ttmp_prec.append(precision(actual, predicted))\n\t\ttmp_rec.append(recall(actual, predicted))\n\t\ttmp_fmeasure.append(f_measure(actual, predicted))\n\n\taccuracies.append( round(sum(tmp_acc) / float(rounds), 2) )\n\tprecisions.append( round(sum(tmp_prec) / float(rounds), 2) )\n\trecalls.append( round(sum(tmp_rec) / float(rounds), 2) )\n\tfmeasures.append(round(sum(tmp_fmeasure) / float(rounds), 2))\n\tclassnames = list(set(get_class_names(total_actual) + get_class_names(total_predicted)))\n\tcm = confusion_matrix(total_actual, total_predicted, labels=classnames)\n\t#gm.plot_confusion_matrix(cm, classnames, normalize=True)\n\n\tgm.add_x(x)\n\t# accuracies = [0.4, 0.4, 0.4]\n\t# precisions = [0.5, 0.5, 0.5]\n\t# recalls = [0.62, 0.62, 0.62]\n\t# fmeasures = [0.23, 0.23, 0.28]\n\tgm.append_y(accuracies)\n\tgm.append_y(precisions)\n\tgm.append_y(recalls)\n\tgm.append_y(fmeasures)\n\tgm.store()\n\tsubtitle = \"Scores were averaged over \" + str(rounds) + \" tests with \" + str(len(classes)) + \" classes. \" + \\\n\t\"Number of simulated columns per class: \" + str(number_of_classes)\n\tlabels = [\"Accuracy\", \"Precision\", \"Recall\", \"F-Measure\"]\n\tgm.plot_bar_n(\"Matcher Type\", \"Score\", \"Accuracy of Matchers\", labels, subtitle=subtitle)", "def r2_score(self):\n print('R^2 (coefficient of determination) regression score function: ' +\n str(r2_score(self.model.dataset.get_y_test(), self.model.get_predicted())))", "def test_setup_log_reg_classifier(self):\n \n\n model ,vec, x_testing=setup_log_reg_classifier(self.training_data, self.training_y, self.testing_data,\"text\", method=\"count\")\n \n model2 ,vec_tfidf, x_testing2=setup_log_reg_classifier(self.training_data, self.training_y, self.testing_data,\"text\", method=\"tfidf\")\n \n \n \"\"\" Test correct data types for countVectorizer\"\"\" \n \n self.assertIsInstance(vec,\n sklearn.feature_extraction.text.CountVectorizer)\n \n self.assertIsInstance(x_testing, scipy.sparse.csr.csr_matrix)\n \n self.assertIsInstance(model, sklearn.linear_model.LogisticRegression)\n \n \"\"\" Test correct data types TfidfVectorizer\"\"\" \n \n self.assertIsInstance(vec_tfidf,\n sklearn.feature_extraction.text.TfidfVectorizer)\n \n self.assertIsInstance(x_testing2, scipy.sparse.csr.csr_matrix)\n \n self.assertIsInstance(model2, sklearn.linear_model.LogisticRegression)\n \n \n \"\"\" Test correct behaviour for wrong method\"\"\" \n \n self.assertTrue(setup_log_reg_classifier(self.training_data, self.training_y, self.testing_data,\"text\", method=\"ijfsiohf\"),\n 1)", "def r2_score(y_true, y_pred, *, sample_weight=..., multioutput=...):\n ...", "def test(self, training_set, original_test_set, imitation_test_set ):\n\n plt.figure()\n\n training_axis = np.arange(len(training_set))\n original_test_axis = np.arange(len(original_test_set))+len(training_axis)\n imitation_test_axis = np.arange(len(imitation_test_set))+len(training_axis)+len(original_test_set)\n\n training_scores = []\n original_test_scores = []\n imitation_test_scores = []\n\n for signature in training_set:\n vectorized_signature = signature.tolist()\n score = -1*self.model.score(vectorized_signature)\n training_scores.append(score)\n\n for signature in original_test_set:\n vectorized_signature = signature.tolist()\n score = -1*self.model.score(vectorized_signature)\n original_test_scores.append(score)\n\n for signature in imitation_test_set:\n vectorized_signature = signature.tolist()\n score = -1*self.model.score(vectorized_signature)\n imitation_test_scores.append(score)\n\n accuracy, threshold = self.evaluate(training_scores, original_test_scores, imitation_test_scores)\n\n xaxis = np.arange(len(imitation_test_set)+len(training_axis)+len(original_test_set))\n plt.plot( xaxis, threshold*np.ones(len(xaxis)), \"--\", label=\"Threshold\" )\n plt.scatter(training_axis,training_scores, label=\"Training data\")\n plt.scatter(original_test_axis, original_test_scores, c=\"g\", label=\"Original Test data\")\n plt.scatter(imitation_test_axis, imitation_test_scores, c=\"r\", label=\"Imitated Test data\")\n plt.legend(loc=\"best\")\n plt.title(f\"{self.user} data. Accuracy={accuracy} \")\n plt.ylabel(\"Score\")\n plt.xlabel(\"File\")\n plt.savefig(f\"{self.n_components}_{self.user}.png\")\n\n Model.accuracies.append(accuracy)", "def part2():\n\tX, Xval, yval = loadDataSet('ex8data2.mat')\n\tmu, sigma2 = estimateGaussian(X)\n\tp = multivariateGaussian(X, mu, sigma2)\n\tpval = multivariateGaussian(Xval, mu, sigma2)\n\tepsilon, F1 = selectThreshold(yval, pval)\n\n\tprint('Best epsilon found using cross-validation: %e\\n' % (epsilon))\n\tprint('Best F1 on Cross Validation Set: %f\\n' % (F1))\n\tprint('# Outliers found: %d\\n' % (np.sum(p < epsilon)))\n\tprint('(you should see a value epsilon of about 1.38e-18)\\n\\n')", "def model_performance_comparison(self, yvar, prev_group, C_FP, C_FN):\n trn_bl_df = self.reweigh()\n # sample_weights = self.reweigh(bl_df=trn_bl_df)\n\n s_weights = trn_bl_df.instance_weights\n print(s_weights)\n\n trainset = trn_bl_df.convert_to_dataframe()[0]\n testset = trainset\n\n X = trainset.loc[:, trainset.columns != yvar]\n y = trainset[yvar]\n #X_test = testset.loc[:, trainset.columns != yvar]\n #y_test = testset[yvar]\n\n X_test = X\n y_test = y\n\n clf_ww = sklearn.linear_model.LogisticRegression(random_state=999).fit(X, y, sample_weight=s_weights)\n clf_wow = sklearn.linear_model.LogisticRegression(random_state=999).fit(X, y)\n\n output_probabilities_to_csv(model=clf_ww, x_test=X_test, path='probs_ww_withprvgroup.csv',priv_group_col=trainset[prev_group], actuals=y_test)\n output_probabilities_to_csv(model=clf_wow, x_test=X_test, path='probs_wow_withprvgroup.csv', priv_group_col=trainset[prev_group], actuals=y_test)\n\n print(\"------------------------------------------\")\n print(\"Accuracy of Vanila Logistic Model\")\n print(\"------------------------------------------\")\n print(\"Without Weights : \", round(clf_wow.score(X_test, y_test), 3))\n print(\"With Weights : \", round(clf_ww.score(X_test, y_test), 3))\n\n X_test_age1 = testset.loc[:, trainset.columns != yvar][testset[prev_group] == 1.0]\n y_test_age1 = testset[yvar][testset[prev_group] == 1.0]\n X_test_age0 = testset.loc[:, trainset.columns != yvar][testset[prev_group] == 0.0]\n y_test_age0 = testset[yvar][testset[prev_group] == 0.0]\n\n wow = round(abs(clf_wow.score(X_test_age0, y_test_age0) - clf_wow.score(X_test_age1, y_test_age1)), 3)\n ww = round(abs(clf_ww.score(X_test_age0, y_test_age0) - clf_ww.score(X_test_age1, y_test_age1)), 3)\n\n #output_probabilities_to_csv(model=clf_ww, x_test=X_test_age0, path='probs_unpriv_ww.csv')\n #output_probabilities_to_csv(model=clf_ww, x_test=X_test_age1, path='probs_priv_ww.csv')\n #output_probabilities_to_csv(model=clf_wow, x_test=X_test_age0, path='probs_unpriv_wow.csv')\n #output_probabilities_to_csv(model=clf_wow, x_test=X_test_age1, path='probs_priv_wow.csv')\n\n print(\"\")\n print(\"\")\n print(\"--------------------------------------------------------------\")\n print(\"Difference in accuracy between privileged and unprivileged\")\n print(\"--------------------------------------------------------------\")\n print(\"without weights : \", wow)\n print(\"with weights : \", ww)\n\n Ypredclf = clf_ww.predict(X_test)\n Ypredclf2 = clf_wow.predict(X_test)\n withw = confusion_matrix(y_test, Ypredclf)\n without = confusion_matrix(y_test, Ypredclf2)\n print(\"\")\n print(\"\")\n print(\"--------------------------------------------------------------\")\n print(\"Confusion Matrix\")\n print(\"--------------------------------------------------------------\")\n print(\"without weights\")\n print(without)\n print(\"\")\n print(\"\")\n print(\"with weights\")\n print(withw)\n\n a, b, c, d = without.ravel() #(tn, fp, fn, tp)\n a1, b1, c1, d1 = withw.ravel() #(tn, fp, fn, tp)\n\n withweights = b1 * C_FP + c1 * C_FN\n withoutweights = b * C_FP + c * C_FN\n\n print(\"\")\n print(\"\")\n print(\"cost with weights: \", withweights)\n print(\"cost without weights: \", withoutweights)\n print(\"Has cost decreased after reweighing?\", withweights < withoutweights)\n\n print('')\n print('SUMMARY TABLE')\n\n cost = fr.CostingFairness(input_dataframe=self.data,\n label_names=['credit'],\n protected_attribute_names=['Age_previliged'],\n trained_model=clf_ww)\n\n metrics_table = self.generate_pre_train_metrics_table(model_without_weights=clf_wow,\n model_with_weights=clf_ww,\n test_set=testset,\n target=yvar,\n privileged=prev_group,\n false_positive_cost=C_FP,\n false_negative_cost=C_FN)\n priv_diff_table = generate_privileged_diff(metrics_table)\n delta_table = generate_delta_table(metrics_table)\n costs_table = cost.return_cost_fairness_accuracy_optimised()\n\n # pdf = PDF()\n # pdf.add_page()\n # pdf.write_table_to_pdf(metrics_table)\n # pdf.write_table_to_pdf(priv_diff_table)\n # pdf.write_table_to_pdf(delta_table)\n # pdf.output('TEST01.pdf', 'F')\n\n print(\"\")\n print(\"What we see is interesting, after re-weighing the bias of the model has decreased significantly by {}%, \"\n \"with a very slight decrease in accuracy as shown earlier\".format(round((wow - ww) * 100)))\n\n return metrics_table, priv_diff_table, delta_table, costs_table", "def fit_test(self):", "def test_G_2_by_2_1tailed_examples(self):\r\n # first up...the famous arginine case\r\n self.assertFloatEqualAbs(G_2_by_2(36, 16, 38, 106), (29.111609, 0),\r\n 0.00001)\r\n # then some other miscellaneous positive and negative values\r\n self.assertFloatEqualAbs(\r\n G_2_by_2(0, 52, 12, 132), (-7.259930, 0.996474),\r\n 0.00001)\r\n self.assertFloatEqualAbs(\r\n G_2_by_2(5, 47, 14, 130), (-0.000481, 0.508751),\r\n 0.00001)\r\n self.assertFloatEqualAbs(\r\n G_2_by_2(5, 47, 36, 108), (-6.065167, 0.993106),\r\n 0.00001)", "def compare_coefficients1():\n directory = \"C:\\\\Users\\\\Casper\\\\Projects\\\\MasterScriptie\\\\custom_projects\\\\editing\\\\PHT_Preprocessing\\\\out\\\\{}\\\\data.csv\".format(dataset)\n\n X = pd.read_csv(directory)[var_list].to_numpy()[:datapoints_amount]\n y = np.squeeze(pd.read_csv(directory)[target_list].to_numpy())[:datapoints_amount]\n \n X_mean = np.mean(X, axis=0)\n X_std = np.std(X, axis=0)\n \n X_standardized = standardize(X, X_mean, X_std)\n \n model = LogisticRegression().fit(X, y) \n model_standardized = LogisticRegression().fit(X_standardized, y) \n \n print(\"coefficients \", model.coef_)\n print(\"beta coefficients \", model_standardized.coef_)\n \n for tuple_ in zip(model.coef_[0], X_std):\n standardized_coef = unstd_to_std_coef2_log(*tuple_)\n print(standardized_coef)\n \n for tuple_ in zip(model_standardized.coef_[0], X_std):\n unstd_coef = std_to_unstd_coef_log(*tuple_)\n print(unstd_coef)\n \n print(\"\\nintercept \", model.intercept_)\n print(\"coef \", unstd_coef)\n print(\"xmean \", X_mean)", "def compare():\n from sklearn import datasets\n from sklearn import linear_model\n from sklearn.metrics import mean_squared_error\n\n boston_data = datasets.load_boston()\n X, y = boston_data.data, boston_data.target\n linreg = linear_model.LinearRegression()\n lr_model = linreg.fit(X ,y)\n lr_mse = mean_squared_error(lr_model.predict(X), y)\n print 'Linear regression:', lr_mse\n\n ridge = linear_model.Ridge()\n ridge_model = ridge.fit(X, y)\n ridge_mse = mean_squared_error(ridge.predict(X), y)\n print 'Ridge regression:', ridge_mse\n\n lasso = linear_model.Lasso()\n lasso_model = lasso.fit(X, y)\n lasso_mse = mean_squared_error(lasso_model.predict(X), y)\n print 'Lasso regression:', lasso_mse", "def main():\n users = [i.id for i in list(User.select())]\n sample_users = random.sample(users, _SAMPLE_NUMBER)\n actual_result = []\n average_result = []\n nearest_neighbour_result = []\n slope_one_result = []\n hybird_result = []\n for user_id in sample_users:\n print('Current user:', get_user_by_id(user_id))\n movie_id = random.choice(get_movie_rating_by_user(user_id)).movie_id\n print('Current movie:', get_movie_by_id(movie_id))\n actual = get_user_movie_rating(user_id, movie_id)\n print('Actual Rating:', actual)\n actual_result.append(actual)\n avg = average_rating(movie_id, True)\n print('Average Rating:', avg)\n average_result.append(avg)\n nearest = nearest_neighbour(user_id, movie_id, True)\n print('Nearest Neighbour Rating:', nearest)\n nearest_neighbour_result.append(nearest)\n slope = slope_one(user_id, movie_id, True)\n print('Slope One Rating:', slope)\n slope_one_result.append(parse_result(slope))\n hybrid = hybrid_algorithm(avg, nearest, slope, True)\n print('Hybrid Algorithm Rating:', hybrid)\n hybird_result.append(parse_result(hybrid))\n print()\n\n if _DEBUG:\n print(actual_result)\n print(average_result)\n print(nearest_neighbour_result)\n print(slope_one_result)\n print(hybird_result)\n\n print('RMSD of each recommender system')\n print(' Average Rating '.center(80, '#'))\n print(rmsd(actual_result, average_result))\n print(' Nearest Neighbour '.center(80, '#'))\n print(rmsd(actual_result, nearest_neighbour_result))\n print(' Slope One '.center(80, '#'))\n print(rmsd(actual_result, slope_one_result))\n print(' Hybrid Algorithm '.center(80, '#'))\n print(rmsd(actual_result, hybird_result))", "def test_class():\n riskfree = .03\n maturity = 30/365\n moneyness = np.linspace(-.04, .04, 10)\n premium = np.ones_like(moneyness) * .05\n call = True\n data = {'riskfree': riskfree, 'maturity': maturity,\n 'moneyness': moneyness, 'call': call, 'premium': premium}\n\n sigma = .13\n bsm = BSmodel(sigma, data)\n\n weights = [.63]\n means = [-.01, .09]\n stds = [.16, .05]\n param = weights + means + stds\n mbs = MBSmodel(param, data)\n\n param_a, param_p, param_c = 4, 1.5, -.05\n gb2 = GB2model([param_a, param_p, param_c], data)\n print(gb2.get_pnames())\n\n plt.figure()\n for model in [bsm, mbs, gb2]:\n plt.plot(moneyness, model.density(moneyness), label=model.get_name())\n plt.legend()\n plt.show()\n\n plt.figure()\n for model in [bsm, mbs, gb2]:\n plt.plot(moneyness, model.premium(), label=model.get_name())\n plt.legend()\n plt.show()\n\n plt.figure()\n for model in [bsm, mbs, gb2]:\n plt.plot(moneyness, model.impvol(), label=model.get_name())\n plt.legend()\n plt.show()\n\n print('BS objective function = %.4f' % bsm.objective(sigma))\n print('GB2 objective function = %.4f'\n % gb2.objective([param_a, param_p, param_c]))" ]
[ "0.66347843", "0.6209006", "0.619178", "0.6048459", "0.599496", "0.59430534", "0.5936024", "0.59307", "0.5913007", "0.5906161", "0.587387", "0.58414817", "0.58316684", "0.5810793", "0.5808976", "0.58022356", "0.5780662", "0.57751703", "0.5768546", "0.57460725", "0.5743792", "0.57420045", "0.5735027", "0.57275873", "0.57221603", "0.569569", "0.5694835", "0.56745887", "0.5657737", "0.565169" ]
0.6988483
0
Add model parameters in the modelica file and create dictionary of model parameters This function extract model and subckt information along with their parameters with the help of optionInfo
def addModel(self,optionInfo): modelName = [] modelInfo = {} subcktName = [] paramInfo = [] transInfo = {} for eachline in optionInfo: words = eachline.split() if words[0] == '.include': name = words[1].split('.') if name[1] == 'lib': modelName.append(name[0]) if name[1] == 'sub': subcktName.append(name[0]) elif words[0] == '.param': paramInfo.append(eachline) elif words[0] == '.model': model = words[1] modelInfo[model] = {} eachline = eachline.replace(' = ','=').replace('= ','=').replace(' =','=') eachline = eachline.split('(') templine = eachline[0].split() trans = templine[1] transInfo[trans] = [] if templine[2] in ['npn', 'pnp', 'pmos', 'nmos']: transInfo[trans] = templine[2] eachline[1] = eachline[1].lower() eachline = eachline[1].split() for eachitem in eachline: if len(eachitem) > 1: eachitem = eachitem.replace(')','') iteminfo = eachitem.split('=') for each in iteminfo: modelInfo[model][iteminfo[0]] = iteminfo[1] #Adding details of model(external) and subckt into modelInfo and subcktInfo print "Model Name ------------ >",modelName for eachmodel in modelName: filename = eachmodel + '.lib' if os.path.exists(filename): try: f = open(filename) except: print("Error in opening file") sys.exit() else: print filename + " does not exist" sys.exit() data = f.read() data = data.replace('+', '').replace('\n','').replace(' = ','=').replace('= ','=').replace(' =','=') #data = data.lower() #Won't work if Reference model name is Upper Case newdata = data.split('(') templine_f = newdata[0].split() trans_f = templine_f[1] transInfo[trans_f] = [] if templine_f[2] in ['npn', 'pnp', 'pmos', 'nmos']: transInfo[trans_f] = templine_f[2] refModelName = trans_f newdata[1] = newdata[1].lower() modelParameter = newdata[1].split() modelInfo[refModelName] = {} for eachline in modelParameter: if len(eachline) > 1: eachline = eachline.replace(')','') info = eachline.split('=') for eachitem in info: modelInfo[refModelName][info[0]] = info[1] f.close() return modelName, modelInfo, subcktName, paramInfo ,transInfo
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def doParametersOfInterest(self):\n \n self.modelBuilder.doVar('expr::cosW(\"0.87681811112\",)')\n self.modelBuilder.doVar('expr::sinW(\"0.48082221247\",)')\n self.modelBuilder.doVar('expr::mZ(\"91.2\",)')\n self.modelBuilder.doVar('expr::Lambda1(\"100.0\",)')\n self.modelBuilder.doVar('expr::e2(\"0.0917\",)')\n self.modelBuilder.doVar('expr::gs2(\"1.533\",)')\n\n # EFT Higgs basis couplings\n\n self.modelBuilder.doVar('cZ[0,-1,1]') \n self.modelBuilder.doVar(\"cZZ[0,-2,2]\") \n self.modelBuilder.doVar(\"cZZt[0,-2,2]\") \n self.modelBuilder.doVar(\"cZB[0,-6,6]\") \n\n poi='cZ,cZZ,cZZt,cZB'\n\n # Amplitude couplings from EFT couplings \n\n self.modelBuilder.doVar('expr::a1(\"@0+1\",cZ)') # (\"2*(@0+1)\",cZ) in AN/Paper but a1 = 1 for signal model and width calculation\n self.modelBuilder.doVar('expr::a2(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZ,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::a3(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZt,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::k1(\"@0*(@1*pow(@2,2)/(pow(@3,2)*pow(@4,2)))\",cZB,e2,Lambda1,sinW,mZ)')\n self.modelBuilder.doVar('expr::k1L1(\"@0/pow(@1,2)\",k1,Lambda1)')\n\n ###### gamma_H ########\n\n # SMEFT relationships for VV couplings (Expressed using amplitude couplings)\n\n self.modelBuilder.doVar('expr::kappa(\"1.0\",)')\n self.modelBuilder.doVar('expr::kappa_tilde(\"0.0\",)') \n\n self.modelBuilder.doVar('expr::a1_WW(\"@0\",a1)')\n self.modelBuilder.doVar('expr::a2_WW(\"@0*@0*@1\",cosW,a2)')\n self.modelBuilder.doVar('expr::a3_WW(\"@0*@0*@1\",cosW,a3)')\n self.modelBuilder.doVar('expr::k1_WW(\"(@2 / (@0*@0 - @1*@1) - 2*@1*@1*@3*@4*@4 /(@5*@5*(@0*@0 - @1*@1)))\",cosW,sinW,k1,a2,Lambda1,mZ)')\n self.modelBuilder.doVar('expr::k2_k1(\"2*@0*@1*@2/(@0*@0 - @1*@1)\",cosW,sinW,k1)')\n self.modelBuilder.doVar('expr::k2_a2(\"-2*@0*@1*@3*@4*@4/((@2*@2)*(@0*@0 - @1*@1))\",cosW,sinW,mZ,a2,Lambda1)')\n self.modelBuilder.doVar('expr::k2(\"@0 + @1\",k2_k1,k2_a2)')\n\n # Determine gamma_H from VV couplings\n\n zz_expr = '\"4*(@0*@0/4. + 0.1695*@3*@3 + 0.09076*@1*@1 + 0.03809*@2*@2 + 0.8095*@0*@3/2. + 0.5046*@0*@1/2. + 0.2092*@1*@3 + 0.1023*@4*@4 + 0.1901*@0*@4/2. + 0.07429*@3*@4 + 0.04710*@1*@4) \",a1,a2,a3,k1,k2'\n ww_expr = '\"4*(@0*@0/4. + 0.1320*@3*@3 + 0.1944*@1*@1 + 0.08075*@2*@2 + 0.7204*@0*@3/2. + 0.7437*@0*@1/2. + 0.2774*@3*@1) \",a1_WW,a2_WW,a3_WW,k1_WW'\n zgamma_expr = '\"4*(1.118600*@0*@0/4. +0.0035*@1*@1 - 0.125010*@0*@1/2. + 0.000003*@1*@1 - 0.00018*@1*@1 + 0.003100*@0*@1/2. +0.00126*@2*@2 + 0.000005*@2*@2 -0.00047*@2*@2)\",a1_WW,kappa,kappa_tilde'\n gg_expr = '\"(1.1068*@0*@0 + 0.0082*@0*@0 - 0.1150*@0*@0 + 2.5717*@1*@1 + 0.0091*@1*@1 - 0.1982*@1*@1)\",kappa,kappa_tilde'\n bb_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n cc_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n tautau_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n mumu_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n gmgm_expr = '\"4*(1.6054*@0*@0/4. + 0.07312*@1*@1 - 0.6854*@0*@1/2. + 0.00002*@1*@1 - 0.0018*@1*@1 + 0.0085*@0*@1/2. + 0.1699*@2*@2 + 0.00002*@2*@2 - 0.0031*@2*@2)\",a1_WW,kappa,kappa_tilde'\n \n self.modelBuilder.doVar('expr::R_WW('+str(ww_expr)+')')\n self.modelBuilder.doVar('expr::R_ZZ('+str(zz_expr)+')')\n self.modelBuilder.doVar('expr::R_Zgamma('+str(zgamma_expr)+')')\n self.modelBuilder.doVar('expr::R_gg('+str(gg_expr)+')')\n self.modelBuilder.doVar('expr::R_bb('+str(bb_expr)+')')\n self.modelBuilder.doVar('expr::R_cc('+str(cc_expr)+')')\n self.modelBuilder.doVar('expr::R_tautau('+str(tautau_expr)+')')\n self.modelBuilder.doVar('expr::R_mumu('+str(mumu_expr)+')')\n self.modelBuilder.doVar('expr:R_gammagamma('+str(gmgm_expr)+')')\n\n self.modelBuilder.doVar('expr::gammaH(\"(0.5824*@0 + 0.2137*@1 + 0.08187*@2 + 0.06272*@3 + 0.02891*@4 + 0.02619*@5 + 0.002270*@6 + 0.001533*@7 + 0.0002176*@8 )/0.9998\",R_bb,R_WW,R_gg,R_tautau,R_cc,R_ZZ,R_gammagamma,R_Zgamma,R_mumu)') \n\n ###########################\n\n self.g1V = GetCoupTerms(1,1,1,-0.0001,\"1V\") # Compensate for scaling of k1 templates \n self.g2V = GetCoupTerms(1,1,1,-0.0001,\"2V\") \n \n self.modelBuilder.doVar(\"expr::g2V_1(\\\"\"+str(self.g2V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1(\\\"((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1_Neg(\\\"-1*((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.doVar(\"expr::g2V_2(\\\"\"+str(self.g2V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2(\\\"((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2_Neg(\\\"-1*((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.doVar(\"expr::g2V_3(\\\"\"+str(self.g2V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3(\\\"((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3_Neg(\\\"-1*((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.doVar(\"expr::g2V_4(\\\"\"+str(self.g2V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4(\\\"((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4_Neg(\\\"-1*((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.doVar(\"expr::g2V_5(\\\"\"+str(self.g2V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5(\\\"((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5_Neg(\\\"-1*((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.doVar(\"expr::g2V_6(\\\"\"+str(self.g2V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6(\\\"((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6_Neg(\\\"-1*((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.doVar(\"expr::g2V_7(\\\"\"+str(self.g2V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7(\\\"((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7_Neg(\\\"-1*((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.doVar(\"expr::g2V_8(\\\"\"+str(self.g2V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8(\\\"((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8_Neg(\\\"-1*((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.doVar(\"expr::g2V_9(\\\"\"+str(self.g2V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9(\\\"((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9_Neg(\\\"-1*((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.doVar(\"expr::g2V_10(\\\"\"+str(self.g2V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10(\\\"((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10_Neg(\\\"-1*((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.doVar(\"expr::g2V_11(\\\"\"+str(self.g2V[10])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11(\\\"((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11_Neg(\\\"-1*((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.doVar(\"expr::g2V_12(\\\"\"+str(self.g2V[11])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12(\\\"((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12_Neg(\\\"-1*((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.doVar(\"expr::g2V_13(\\\"\"+str(self.g2V[12])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13(\\\"((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13_Neg(\\\"-1*((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.doVar(\"expr::g2V_14(\\\"\"+str(self.g2V[13])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14(\\\"((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14_Neg(\\\"-1*((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.doVar(\"expr::g2V_15(\\\"\"+str(self.g2V[14])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15(\\\"((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15_Neg(\\\"-1*((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.doVar(\"expr::g2V_16(\\\"\"+str(self.g2V[15])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16(\\\"((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16_Neg(\\\"-1*((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.doVar(\"expr::g2V_17(\\\"\"+str(self.g2V[16])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17(\\\"((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17_Neg(\\\"-1*((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.doVar(\"expr::g2V_18(\\\"\"+str(self.g2V[17])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18(\\\"((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18_Neg(\\\"-1*((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.doVar(\"expr::g2V_19(\\\"\"+str(self.g2V[18])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19(\\\"((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19_Neg(\\\"-1*((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.doVar(\"expr::g2V_20(\\\"\"+str(self.g2V[19])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20(\\\"((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20_Neg(\\\"-1*((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.doVar(\"expr::g2V_21(\\\"\"+str(self.g2V[20])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21(\\\"((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21_Neg(\\\"-1*((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.doVar(\"expr::g2V_22(\\\"\"+str(self.g2V[21])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22(\\\"((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22_Neg(\\\"-1*((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.doVar(\"expr::g2V_23(\\\"\"+str(self.g2V[22])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23(\\\"((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23_Neg(\\\"-1*((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.doVar(\"expr::g2V_24(\\\"\"+str(self.g2V[23])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24(\\\"((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24_Neg(\\\"-1*((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.doVar(\"expr::g2V_25(\\\"\"+str(self.g2V[24])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25(\\\"((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25_Neg(\\\"-1*((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.doVar(\"expr::g2V_26(\\\"\"+str(self.g2V[25])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26(\\\"((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26_Neg(\\\"-1*((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.doVar(\"expr::g2V_27(\\\"\"+str(self.g2V[26])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27(\\\"((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27_Neg(\\\"-1*((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.doVar(\"expr::g2V_28(\\\"\"+str(self.g2V[27])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28(\\\"((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28_Neg(\\\"-1*((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.doVar(\"expr::g2V_29(\\\"\"+str(self.g2V[28])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29(\\\"((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29_Neg(\\\"-1*((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.doVar(\"expr::g2V_30(\\\"\"+str(self.g2V[29])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30(\\\"((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30_Neg(\\\"-1*((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.doVar(\"expr::g2V_31(\\\"\"+str(self.g2V[30])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31(\\\"((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31_Neg(\\\"-1*((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.doVar(\"expr::g2V_32(\\\"\"+str(self.g2V[31])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32(\\\"((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32_Neg(\\\"-1*((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.doVar(\"expr::g2V_33(\\\"\"+str(self.g2V[32])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33(\\\"((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33_Neg(\\\"-1*((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.doVar(\"expr::g2V_34(\\\"\"+str(self.g2V[33])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34(\\\"((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34_Neg(\\\"-1*((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.doVar(\"expr::g2V_35(\\\"\"+str(self.g2V[34])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35(\\\"((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35_Neg(\\\"-1*((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n \n self.modelBuilder.doVar(\"expr::g1V_1(\\\"\"+str(self.g1V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1(\\\"((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1_Neg(\\\"-1*((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.doVar(\"expr::g1V_2(\\\"\"+str(self.g1V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2(\\\"((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2_Neg(\\\"-1*((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.doVar(\"expr::g1V_3(\\\"\"+str(self.g1V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3(\\\"((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3_Neg(\\\"-1*((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.doVar(\"expr::g1V_4(\\\"\"+str(self.g1V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4(\\\"((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4_Neg(\\\"-1*((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.doVar(\"expr::g1V_5(\\\"\"+str(self.g1V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5(\\\"((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5_Neg(\\\"-1*((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.doVar(\"expr::g1V_6(\\\"\"+str(self.g1V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6(\\\"((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6_Neg(\\\"-1*((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.doVar(\"expr::g1V_7(\\\"\"+str(self.g1V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7(\\\"((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7_Neg(\\\"-1*((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.doVar(\"expr::g1V_8(\\\"\"+str(self.g1V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8(\\\"((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8_Neg(\\\"-1*((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.doVar(\"expr::g1V_9(\\\"\"+str(self.g1V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9(\\\"((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9_Neg(\\\"-1*((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.doVar(\"expr::g1V_10(\\\"\"+str(self.g1V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10(\\\"((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10_Neg(\\\"-1*((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n \n self.modelBuilder.doSet(\"POI\",poi)", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"dAfb[0.,-0.75,0.75]\");\n self.modelBuilder.doVar(\"dA0[0.0, -1.0, 1.0]\");\n #self.modelBuilder.doSet(\"POI\",\"dAfb,dA0\")\n self.modelBuilder.doSet(\"POI\",\"dAfb\")\n self.modelBuilder.factory_('expr::mAfb(\"@0+@1\",eAfb,dAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0+@1)\",eA0,dA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"mAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"mA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"eAfb,mAfb\")\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"rAfb[1.0,-5.0, 5.0]\");\n self.modelBuilder.doVar(\"rA0[1.0, -5.0, 5.0]\");\n self.modelBuilder.doSet(\"POI\",\"rAfb,rA0\")\n self.modelBuilder.factory_('expr::mAfb(\"@0*@1\",eAfb,rAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0*@1)\",eA0,rA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def main(args):\n if len(sys.argv) == 2:\n filename = sys.argv[1]\n else:\n print \"USAGE:\"\n print \"python NgspicetoModelica.py <filename>\"\n sys.exit()\n \n dir_name = os.path.dirname(os.path.realpath(filename))\n file_basename = os.path.basename(filename)\n \n obj_NgMoConverter = NgMoConverter()\n \n #Getting all the require information\n lines = obj_NgMoConverter.readNetlist(filename)\n #print \"Complete Lines of Ngspice netlist :lines ---------------->\",lines\n optionInfo, schematicInfo = obj_NgMoConverter.separateNetlistInfo(lines)\n #print \"All option details like analysis,subckt,.ic,.model : OptionInfo------------------->\",optionInfo\n #print \"Schematic connection info :schematicInfo\",schematicInfo\n modelName, modelInfo, subcktName, paramInfo,transInfo = obj_NgMoConverter.addModel(optionInfo)\n print \"Name of Model : modelName-------------------->\",modelName\n print \"Model Information :modelInfo--------------------->\",modelInfo\n #print \"Subcircuit Name :subcktName------------------------>\",subcktName\n #print \"Parameter Information :paramInfo---------------------->\",paramInfo\n \n \n modelicaParamInit = obj_NgMoConverter.processParam(paramInfo)\n #print \"Make modelicaParamInit from paramInfo :processParamInit------------->\",modelicaParamInit \n compInfo, plotInfo = obj_NgMoConverter.separatePlot(schematicInfo)\n print \"Info like run etc : CompInfo----------------->\",compInfo\n #print \"Plot info like plot,print etc :plotInfo\",plotInfo\n IfMOS = '0'\n \n for eachline in compInfo:\n words = eachline.split()\n if eachline[0] == 'm':\n IfMOS = '1'\n break\n subOptionInfo = []\n subSchemInfo = []\n if len(subcktName) > 0:\n #subOptionInfo = []\n #subSchemInfo = []\n for eachsub in subcktName:\n filename_temp = eachsub + '.sub'\n data = obj_NgMoConverter.readNetlist(filename_temp)\n subOptionInfo, subSchemInfo = obj_NgMoConverter.separateNetlistInfo(data)\n for eachline in subSchemInfo:\n words = eachline.split()\n if eachline[0] == 'm':\n IfMOS = '1'\n break\n #print \"Subcircuit OptionInfo : subOptionInfo------------------->\",subOptionInfo\n #print \"Subcircuit Schematic Info :subSchemInfo-------------------->\",subSchemInfo\n \n node, nodeDic, pinInit, pinProtectedInit = obj_NgMoConverter.nodeSeparate(compInfo, '0', [], subcktName,[])\n print \"All nodes in the netlist :node---------------->\",node\n print \"NodeDic which will be used for modelica : nodeDic------------->\",nodeDic\n #print \"PinInit-------------->\",pinInit\n #print \"pinProtectedInit----------->\",pinProtectedInit\n \n modelicaCompInit, numNodesSub = obj_NgMoConverter.compInit(compInfo,node, modelInfo, subcktName,dir_name,transInfo)\n print \"ModelicaComponents : modelicaCompInit----------->\",modelicaCompInit\n print \"SubcktNumNodes : numNodesSub---------------->\",numNodesSub\n \n connInfo = obj_NgMoConverter.connectInfo(compInfo, node, nodeDic, numNodesSub,subcktName)\n \n #print \"ConnInfo------------------>\",connInfo\n \n \n ###After Sub Ckt Func\n if len(subcktName) > 0:\n data, subOptionInfo, subSchemInfo, subModel, subModelInfo, subsubName,subParamInfo, modelicaSubCompInit, modelicaSubParam,\\\n nodeSubInterface,nodeSub, nodeDicSub, pinInitSub, connSubInfo = obj_NgMoConverter.procesSubckt(subcktName,numNodesSub,dir_name) #Adding 'numNodesSub' by Fahim\n \n #Creating Final Output file\n newfile = filename.split('.')\n newfilename = newfile[0]\n outfile = newfilename + \".mo\"\n out = open(outfile,\"w\")\n out.writelines('model ' + os.path.basename(newfilename))\n out.writelines('\\n')\n if IfMOS == '0':\n out.writelines('import Modelica.Electrical.*;')\n elif IfMOS == '1':\n out.writelines('import BondLib.Electrical.*;')\n #out.writelines('import Modelica.Electrical.*;')\n out.writelines('\\n')\n \n for eachline in modelicaParamInit:\n if len(paramInfo) == 0:\n continue\n else:\n out.writelines(eachline)\n out.writelines('\\n')\n for eachline in modelicaCompInit:\n if len(compInfo) == 0:\n continue\n else:\n out.writelines(eachline)\n out.writelines('\\n')\n \n out.writelines('protected')\n out.writelines('\\n')\n out.writelines(pinInit)\n out.writelines('\\n')\n out.writelines('equation')\n out.writelines('\\n')\n \n for eachline in connInfo:\n if len(connInfo) == 0:\n continue\n else:\n out.writelines(eachline)\n out.writelines('\\n')\n \n out.writelines('end '+ os.path.basename(newfilename) + ';')\n out.writelines('\\n')\n\n\n out.close()", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Rdy[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rbk[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rqcd_emu[1,0.0,10.0]\");\n self.modelBuilder.doSet(\"POI\",\"Rbk,Rdy,Rqcd_emu\")", "def doParametersOfInterest(self):\r\n if self.fg4fixed:\r\n self.modelBuilder.doVar(\"CMS_zz4l_fg4[0]\")\r\n self.modelBuilder.doVar(\"r[1,0,4]\")\r\n print \"Fixing CMS_zz4l_fg4\"\r\n poi = \"r\"\r\n else:\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg4\"):\r\n print \"have fg4 inside\"\r\n else:\r\n self.modelBuilder.doVar(\"CMS_zz4l_fg4[0.,-1,1]\")\r\n poi = \"CMS_zz4l_fg4\"\r\n if self.cPOI:\r\n if self.modelBuilder.out.var(\"cww_zz\"):\r\n print \"have czz_ww inside\"\r\n else:\r\n self.modelBuilder.doVar(\"cww_zz[0.5,-10,10]\")\r\n poi += \",cww_zz\"\r\n\r\n if self.fg2POI:\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg2\"):\r\n print \"have fg2 inside\"\r\n else:\r\n self.modelBuilder.doVar(\"CMS_zz4l_fg2[0.,0,1]\")\r\n poi += \",CMS_zz4l_fg2\"\r\n if self.muFloating:\r\n self.modelBuilder.doVar(\"r[1,0,2000]\")\r\n if self.muAsPOI:\r\n print \"Treating r as a POI\"\r\n poi += \",r\"\r\n else:\r\n self.modelBuilder.out.var(\"r\").setAttribute(\"flatParam\")\r\n if self.phiFloating:\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg4phi\"):\r\n print \"have fg4phi inside\"\r\n else: \r\n self.modelBuilder.doVar(\"CMS_zz4l_fg4phi[0.,-3.1415926,3.1415926]\")\r\n if self.phiPOI:\r\n poi += \",CMS_zz4l_fg4phi\"\r\n else:\r\n self.modelBuilder.out.var(\"CMS_zz4l_fg4phi\").setAttribute(\"flatParam\")\r\n if self.phi2Floating:\r\n #self.modelBuilder.doVar(\"CMS_zz4l_fg4phi[0.,-math.pi,math.pi]\")\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg2phi\"):\r\n print \"have fg2phi inside\"\r\n else: \r\n self.modelBuilder.doVar(\"CMS_zz4l_fg2phi[0.,-3.1415926,3.1415926]\")\r\n self.modelBuilder.out.var(\"CMS_zz4l_fg2phi\").setAttribute(\"flatParam\")\r\n \r\n self.modelBuilder.doSet(\"POI\",poi)", "def extractModelParam(self):\n copasi_filename = self.genPathCopasi(\"extractor\")\n self.recentModel = model.loada(self.antString, copasi_filename)\n return self.recentModel.parameters.copy().squeeze().to_dict()", "def giveMotevoParamFile(genome, wmlen, inter_dir, tag, aligned, ufemodel_path, ATfreq, GCfreq, emprior, bgorder, bgprior):\n\n ##UFE_models from genome_dict are not used anymore\n #UFEmodel_hg19 is UFE model for mammal species\n genome_dict = {}\n genome_dict['hg19'] = ['((((hg19:0.032973,rheMac2:0.057695):0.09821,mm9:0.352605):0.020666,(bosTau6:0.186713,(equCab2:0.107726,canFam2:0.150374):0.010431):0.032764):0.156024,monDom5:0.425899);', '/import/bc2/home/nimwegen/GROUP/hseq_pipeline/severin/Anduril/Pipeline/MotEvo_v1.0/UFEmodels/UFEmodel_hg19']\n genome_dict['hg18'] = ['((((hg18:0.032973,rheMac2:0.057695):0.09821,mm9:0.352605):0.020666,(bosTau3:0.186713,(equCab1:0.107726,canFam2:0.150374):0.010431):0.032764):0.156024,monDom4:0.425899);', '/import/bc2/home/nimwegen/GROUP/hseq_pipeline/severin/Anduril/Pipeline/MotEvo_v1.0/UFEmodels/UFE_mammals']\n #genome_dict['dm3'] = ['((((((dm3:0.059,droSim1:0.075):0.041,(droYak2:0.104,droEre2:0.107):0.054):0.120,droAna3:0.377):0.072,dp4:0.397):0.061,droWil1:0.536):0.020,((droVir3:0.196,droMoj3:0.255):0.073,droGri2:0.291):0.337);', '/import/bc2/home/nimwegen/GROUP/hseq_pipeline/severin/Anduril/Pipeline/MotEvo_v1.0/UFEmodels/UFEmodel_dm3']\n genome_dict['dm3'] = ['((((((dm3:0.059,droSim1:0.075):0.041,(droYak2:0.104,droEre2:0.107):0.054):0.120,droAna3:0.377):0.072,dp4:0.397):0.061,droWil1:0.536):0.020,((droVir3:0.196,droMoj3:0.255):0.073,droGri2:0.291):0.337);', '/import/bc2/home/nimwegen/GROUP/hseq_pipeline/severin/Anduril/Pipeline/MotEvo_v1.0/UFEmodels/dm3UFEparallel/UFEmodel_dm3']\n genome_dict['mm9'] = ['((((hg19:0.032973,rheMac2:0.057695):0.09821,mm9:0.352605):0.020666,(bosTau7:0.186713,(equCab2:0.107726,canFam2:0.150374):0.010431):0.032764):0.156024,monDom5:0.425899);', '/import/bc2/home/nimwegen/GROUP/hseq_pipeline/severin/Anduril/Pipeline/MotEvo_v1.0/UFEmodels/UFEmodel_mm9']\n\n\n sitefilepath = os.path.join(inter_dir, 'sites_' + tag)\n priorfilepath = os.path.join(inter_dir, 'priors_' + tag)\n loglikfile = os.path.join(inter_dir, 'loglik_' + tag)\n\n\n print '\\nCreate motevo parameter file %s' %tag\n print 'aligned', aligned\n if aligned:\n motevo_params = '\\n'.join(['refspecies %s' %genome,\n 'TREE %s' %genome_dict[genome][0],\n 'Mode TFBS',\n 'EMprior %s' %emprior,\n 'priordiff %s' %0.05,\n 'UFEwmprior %s' %200,\n 'UFEwmfile %s' %ufemodel_path,\n 'UFEwmlen %s' %wmlen,\n 'UFEprint %s' %0,\n 'markovorderBG %s' %bgorder,\n 'bgprior %s' %bgprior,\n 'bg A %s' %ATfreq,\n 'bg T %s' %ATfreq,\n 'bg G %s' %GCfreq,\n 'bg C %s' %GCfreq,\n 'restrictparses %s' %0,\n 'sitefile %s' %sitefilepath,\n 'priorfile %s' %priorfilepath,\n 'printsiteals %s' %0,\n 'minposterior %f' %0.0,\n 'loglikfile %s' %loglikfile])\n else:\n motevo_params = '\\n'.join(['refspecies %s' %genome,\n 'TREE (%s: 1)' %genome,\n 'Mode TFBS',\n 'EMprior %s' %emprior,\n 'priordiff %s' %0.05,\n 'markovorderBG %s' %bgorder,\n 'bgprior %s' %bgprior,\n 'bg A %s' %ATfreq,\n 'bg T %s' %ATfreq,\n 'bg G %s' %GCfreq,\n 'bg C %s' %GCfreq,\n 'restrictparses %s' %0,\n 'sitefile %s' %sitefilepath,\n 'priorfile %s' %priorfilepath,\n 'printsiteals %s' %0,\n 'minposterior %f' %0.0,\n 'loglikfile %s' %loglikfile]) \n\n params_path = os.path.join(inter_dir, 'motevo_TFBS_params_' + tag)\n pf = open(params_path, 'w')\n pf.write(motevo_params)\n return (params_path, sitefilepath, priorfilepath, loglikfile)", "def set_parameters(self):\n params = {}\n if self.modelname == 'SI':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after splot\n # Ts: Time from split to present, in 2*Na generation units\n names = ['N1', 'N2', 'Ts']\n values = [1, 1, 1]\n upper_bounds = [20, 20, 10]\n lower_bounds = [0.01, 0.01, 0]\n elif self.modelname == 'IM':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # Ts: Time from split to present, in 2*Na generations\n names = ['N1', 'N2', 'm21', 'm12', 'Ts']\n values = [1, 1, 1, 1, 1]\n upper_bounds = [20, 20, 20, 20, 10]\n lower_bounds = [0.01, 0.01, 0, 0, 0]\n elif self.modelname == 'AM':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # Tam: Time from end of anc migration to split, in 2*Na gens\n # Ts: Time from split to present, in 2*Na generations\n names = ['N1', 'N2', 'm21', 'm12', 'Tam', 'Ts']\n values = [1, 1, 1, 1, 0.1, 1]\n upper_bounds = [20, 20, 20, 20, 2, 10]\n lower_bounds = [0.01, 0.01, 0, 0, 0, 0]\n elif self.modelname == 'SC':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # Ts: Time from split to secondary contact, in 2*Na generations\n # Tsc: Time from secondary contact to presesnt, in 2*Na gens\n names = ['N1', 'N2', 'm21', 'm12', 'Ts', 'Tsc']\n values = [1, 1, 1, 1, 1, 0.1]\n upper_bounds = [20, 20, 20, 20, 10, 2]\n lower_bounds = [0.01, 0.01, 0, 0, 0, 0]\n elif self.modelname == 'IM2M':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # mi21: Migration from 1 to 2 in \"islands\" (2*Na*mi21)\n # mi12: Migration from 1 to 2 in \"islands\" (2*Na*mi12)\n # Ts: Time from split to present, in 2*Na generations\n # p: Porpotion of genome evoloving in \"islands\"\n names = ['N1', 'N2', 'm21', 'm12', 'mi21', 'mi12', 'Ts', 'p']\n values = [1, 1, 5, 5, 0.5, 0.5, 1, 0.5]\n upper_bounds = [20, 20, 30, 30, 5, 5, 10, 0.95]\n lower_bounds = [0.01, 0.01, 0, 0, 0, 0, 0, 0.05]\n elif self.modelname == 'AM2M':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # mi21: Migration from 1 to 2 in \"islands\" (2*Na*mi21)\n # mi12: Migration from 1 to 2 in \"islands\" (2*Na*mi12)\n # Tam: Time from end of anc migration to split, in 2*Na gens\n # Ts: Time from split to present, in 2*Na generations\n # p: Porpotion of genome evoloving in \"islands\"\n names = ['N1', 'N2', 'm21', 'm12', 'mi21', 'mi12', 'Tam', 'Ts', 'p']\n values = [1, 1, 5, 5, 0.5, 0.5, 0.1, 1, 0.5]\n upper_bounds = [20, 20, 30, 30, 5, 5, 2, 10, 0.95]\n lower_bounds = [0.01, 0.01, 0, 0, 0, 0, 0, 0, 0.05]\n elif self.modelname == 'SC2M':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # mi21: Migration from 1 to 2 in \"islands\" (2*Na*mi21)\n # mi12: Migration from 1 to 2 in \"islands\" (2*Na*mi12)\n # Ts: Time from split to secondary contact, in 2*Na generations\n # Tsc: Time from secondary contact to presesnt, in 2*Na gens\n # p: Porpotion of genome evoloving in \"islands\"\n names = ['N1', 'N2', 'm21', 'm12', 'mi21', 'mi12', 'Ts', 'Tsc', 'p']\n values = [1, 1, 5, 5, 0.5, 0.5, 1, 0.1, 0.5]\n upper_bounds = [20, 20, 30, 30, 5, 5, 10, 2, 0.95]\n lower_bounds = [0.01, 0.01, 0, 0, 0, 0, 0, 0, 0.05]\n params['Names'] = names\n params['Values'] = values\n params['Upper'] = upper_bounds\n params['Lower'] = lower_bounds\n return params", "def set_up_and_parameterise_model_for_experiment(self):\n self.experiment_unique_steps_to_model = {}\n for op_number, op in enumerate(self.experiment.unique_steps):\n new_model = self.model.new_copy()\n new_parameter_values = self.parameter_values.copy()\n\n if op.type != \"current\":\n # Voltage or power control\n # Create a new model where the current density is now a variable\n # To do so, we replace all instances of the current density in the\n # model with a current density variable, which is obtained from the\n # FunctionControl submodel\n # check which kind of external circuit model we need (differential\n # or algebraic)\n if op.type == \"voltage\":\n submodel_class = pybamm.external_circuit.VoltageFunctionControl\n elif op.type == \"power\":\n submodel_class = pybamm.external_circuit.PowerFunctionControl\n\n # Build the new submodel and update the model with it\n submodel = submodel_class(new_model.param, new_model.options)\n variables = new_model.variables\n submodel.variables = submodel.get_fundamental_variables()\n variables.update(submodel.variables)\n submodel.variables.update(submodel.get_coupled_variables(variables))\n variables.update(submodel.variables)\n submodel.set_rhs(variables)\n submodel.set_algebraic(variables)\n submodel.set_initial_conditions(variables)\n new_model.rhs.update(submodel.rhs)\n new_model.algebraic.update(submodel.algebraic)\n new_model.initial_conditions.update(submodel.initial_conditions)\n\n # Set the \"current function\" to be the variable defined in the submodel\n new_parameter_values[\"Current function [A]\"] = submodel.variables[\n \"Current [A]\"\n ]\n self.update_new_model_events(new_model, op)\n # Update parameter values\n self._original_temperature = new_parameter_values[\"Ambient temperature [K]\"]\n experiment_parameter_values = self.get_experiment_parameter_values(\n op, op_number\n )\n new_parameter_values.update(\n experiment_parameter_values, check_already_exists=False\n )\n parameterised_model = new_parameter_values.process_model(\n new_model, inplace=False\n )\n self.experiment_unique_steps_to_model[repr(op)] = parameterised_model\n\n # Set up rest model if experiment has start times\n if self.experiment.initial_start_time:\n new_model = self.model.new_copy()\n # Update parameter values\n new_parameter_values = self.parameter_values.copy()\n self._original_temperature = new_parameter_values[\"Ambient temperature [K]\"]\n new_parameter_values.update(\n {\"Current function [A]\": 0, \"Ambient temperature [K]\": \"[input]\"},\n check_already_exists=False,\n )\n parameterised_model = new_parameter_values.process_model(\n new_model, inplace=False\n )\n self.experiment_unique_steps_to_model[\n \"Rest for padding\"\n ] = parameterised_model", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.7,0.7]\");\n self.modelBuilder.doVar(\"A0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"Afb,A0\")\n\n # ss templates\n self.modelBuilder.doVar(\"R_ee_os_fakes[0.6,0.0,1.0]\");\n self.modelBuilder.doVar(\"ee16_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee17_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee18_fakes_norm[1.0, 0.01, 10.]\");\n #Remember, cant use spaces in these formulas!\n #self.modelBuilder.options.verbose = 10\n self.modelBuilder.factory_('expr::R_ee16_qcd_os(\"@0*@1\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_os(\"@0*@1\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_os(\"@0*@1\",ee18_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee16_qcd_ss(\"@0*(1.0-@1)\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_ss(\"@0*(1.0-@1)\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_ss(\"@0*(1.0-@1)\",ee18_fakes_norm,R_ee_os_fakes)')\n \n self.modelBuilder.factory_('expr::Alph(\"2.0*@0/(2.0-@0)\",A0)')\n self.modelBuilder.factory_('expr::Norm(\"3.0/4.0/(2.0+@0)\",Alph)')\n self.modelBuilder.factory_('expr::RAlph(\"@0*@1\",Alph,Norm)')\n self.modelBuilder.factory_('expr::Rpl(\"(@0+@1)\",Norm,Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(@0-@1)\",Norm,Afb)')", "def _set_model_parameters(self, verbose=False):\n from scipy.special import gamma\n\n z0 = self.z0\n\n # set parameters that are constants\n p_v, d_v, cs0, sigma, vout0 = (1, 2, 6.7, 0.1, 25.0)\n p_vB, d_vB, Mach0, p_M, d_M = (4, 2, 0.5, 1, 3)\n\n # calculate amplitudes that make the pdf integrate to 1\n A_v = np.log(10)*p_v/gamma(d_v/p_v)\n A_cs = np.log(10)/np.sqrt(2*np.pi)/sigma\n A_vB = np.log(10)*p_vB/gamma(d_vB/p_vB)\n A_M = np.log(10)*p_M/gamma(d_M/p_M)\n\n # store them in dictionaries\n self.cool_params = dict(A_v=A_v, p_v=p_v, d_v=d_v,\n A_cs=A_cs, cs0=cs0, sigma=sigma, vout0=vout0)\n self.hot_params = dict(A_vB=A_vB, p_vB=p_vB, d_vB=d_vB,\n A_M=A_M, Mach0=Mach0,p_M=p_M,d_M=d_M)\n # SN related parameters that set the reference values for loading factors\n self.params = dict(Esn=1.e51*au.erg, mstar=95.5*au.M_sun, vcool=200*au.km/au.s,\n Mej=10.*au.M_sun, ZSN=0.2, ZISM0=0.02)\n self.params['vej'] = np.sqrt(2.0*self.params['Esn']/self.params['Mej']).to('km/s')\n self.ref_params = dict(Mref=self.params['mstar'],\n pref=self.params['Esn']/(2*self.params['vcool']),\n Eref=self.params['Esn'],\n Zref=self.params['Mej']*self.params['ZSN'])\n\n # coefficients used in conversion from mass to other PDFs\n self.vp = (self.ref_params['pref']/self.params['mstar']).to('km/s').value\n self.vE = np.sqrt(self.ref_params['Eref']/self.params['mstar']).to('km/s').value\n self.Ze = (self.ref_params['Zref']/self.params['mstar']).cgs.value\n\n # parameters for scaling relations from Paper~I\n a = np.array(fit_alpha[z0])\n b = np.array(fit_beta[z0])\n\n self.scaling_params = dict(a=a, b=b)\n if z0 == '2H':\n self.cool_params['vout0'] = 45\n self.cool_params['cs0'] = 7.5\n elif z0 == '500':\n self.cool_params['vout0'] = 45\n self.cool_params['cs0'] = 8.5\n elif z0 == '1000':\n self.cool_params['vout0'] = 60\n self.cool_params['cs0'] = 10.0\n self.scaling_params['A'] = np.round(10.**(np.array(self.scaling_params['a'])),2)\n self.scaling_params['p'] = 1.+np.array(self.scaling_params['b'])\n self.enum=dict(M_cool=0, M_int=1, M_hot=2, M_total=3,\n p_cool=4, p_int=5, p_hot=6, p_total=7,\n E_cool=8, E_int=9, E_hot=10, E_total=11,\n Z_cool=12, Z_int=13, Z_hot=14, Z_total=15)\n\n # print parameters\n if verbose:\n self.show_parameters()", "def get_pars(model_info, use_demo=False):\n # Get the default values for the parameters\n pars = dict((p.name, p.default) for p in model_info['parameters'])\n\n # Fill in default values for the polydispersity parameters\n for p in model_info['parameters']:\n if p.type in ('volume', 'orientation'):\n pars[p.name+'_pd'] = 0.0\n pars[p.name+'_pd_n'] = 0\n pars[p.name+'_pd_nsigma'] = 3.0\n pars[p.name+'_pd_type'] = \"gaussian\"\n\n # Plug in values given in demo\n if use_demo:\n pars.update(model_info['demo'])\n return pars", "def _get_model(self):\n\n parameters = {keys._topology:self.topology,\n keys._size:self.size,\n keys._name:self.name,\n #keys._output_activation:self._outActiv_fun_key,\n #keys._hidden_activation:self._hiddenActiv_fun_key,\n keys._learning_rate:self.learningRate,\n keys._momentum:self.momentum}\n\n return parameters", "def get_model_config(model_name, args):\n if model_name == 'WaveGlow':\n model_config = dict(\n n_mel_channels=args.n_mel_channels,\n n_flows=args.flows,\n n_group=args.groups,\n n_early_every=args.early_every,\n n_early_size=args.early_size,\n WN_config=dict(\n n_layers=args.wn_layers,\n kernel_size=args.wn_kernel_size,\n n_channels=args.wn_channels\n )\n )\n return model_config\n elif model_name == 'FastPitch':\n model_config = dict(\n # io\n n_mel_channels=args.n_mel_channels,\n # symbols\n n_symbols=len(get_symbols(args.symbol_set)),\n padding_idx=get_pad_idx(args.symbol_set),\n symbols_embedding_dim=args.symbols_embedding_dim,\n # input FFT\n in_fft_n_layers=args.in_fft_n_layers,\n in_fft_n_heads=args.in_fft_n_heads,\n in_fft_d_head=args.in_fft_d_head,\n in_fft_conv1d_kernel_size=args.in_fft_conv1d_kernel_size,\n in_fft_conv1d_filter_size=args.in_fft_conv1d_filter_size,\n in_fft_output_size=args.in_fft_output_size,\n p_in_fft_dropout=args.p_in_fft_dropout,\n p_in_fft_dropatt=args.p_in_fft_dropatt,\n p_in_fft_dropemb=args.p_in_fft_dropemb,\n # output FFT\n out_fft_n_layers=args.out_fft_n_layers,\n out_fft_n_heads=args.out_fft_n_heads,\n out_fft_d_head=args.out_fft_d_head,\n out_fft_conv1d_kernel_size=args.out_fft_conv1d_kernel_size,\n out_fft_conv1d_filter_size=args.out_fft_conv1d_filter_size,\n out_fft_output_size=args.out_fft_output_size,\n p_out_fft_dropout=args.p_out_fft_dropout,\n p_out_fft_dropatt=args.p_out_fft_dropatt,\n p_out_fft_dropemb=args.p_out_fft_dropemb,\n # duration predictor\n dur_predictor_kernel_size=args.dur_predictor_kernel_size,\n dur_predictor_filter_size=args.dur_predictor_filter_size,\n p_dur_predictor_dropout=args.p_dur_predictor_dropout,\n dur_predictor_n_layers=args.dur_predictor_n_layers,\n # pitch predictor\n pitch_predictor_kernel_size=args.pitch_predictor_kernel_size,\n pitch_predictor_filter_size=args.pitch_predictor_filter_size,\n p_pitch_predictor_dropout=args.p_pitch_predictor_dropout,\n pitch_predictor_n_layers=args.pitch_predictor_n_layers,\n # pitch conditioning\n pitch_embedding_kernel_size=args.pitch_embedding_kernel_size,\n # speakers parameters\n n_speakers=args.n_speakers,\n speaker_emb_weight=args.speaker_emb_weight,\n # energy predictor\n energy_predictor_kernel_size=args.energy_predictor_kernel_size,\n energy_predictor_filter_size=args.energy_predictor_filter_size,\n p_energy_predictor_dropout=args.p_energy_predictor_dropout,\n energy_predictor_n_layers=args.energy_predictor_n_layers,\n # energy conditioning\n energy_conditioning=args.energy_conditioning,\n energy_embedding_kernel_size=args.energy_embedding_kernel_size,\n )\n return model_config\n\n else:\n raise NotImplementedError(model_name)", "def buildParamsDict(self):\n self.params_dict = {\n \"img_dir\": self.savePathJoin(\"Images\"),\n \"depth_dir\": self.savePathJoin(\"Depth\"),\n \"back_of_dir\": self.savePathJoin(\"Back_Of\"),\n \"of_dir\": self.savePathJoin(\"Of\"),\n \"save_dir\": self.user[\"Save\"],\n \"high\": self.high,\n \"low\": self.low,\n \"run_dict\": self.run_dict,\n \"of_model\": self.app.get_resource(\n os.path.join(\"of_models\", \"network-default.pytorch\")\n ),\n \"depth_model\": self.app.get_resource(\n os.path.join(\"depth_models\", \"model_city2kitti.meta\")\n ),\n \"yolo_weights\": self.app.get_resource(\n os.path.join(\"yolo\", \"yolov3.weights\")\n ),\n \"yolo_v\": self.app.get_resource(os.path.join(\"yolo\", \"yolov3.cfg\")),\n \"coco_names\": self.app.get_resource(os.path.join(\"yolo\", \"coco.names\")),\n \"object_detection_dir\": self.savePathJoin(\"ObjectDetection\"),\n \"plot_speed_dir\": PLOT_SPEED_DIR,\n \"plot_crash_dir\": PLOT_CRASH_DIR,\n \"numbers_dir\": NP_DIR,\n \"plot_error_dir\": PLOT_ERROR_DIR,\n \"speed_gt\": self.user[\"GT\"],\n \"vid_path\": self.user[\"Video\"],\n \"super_pixel_method\": self.super_pixel_method,\n \"super_pixel_dir\": SUPER_PIXEL_DIR,\n \"send_video_frame\": False,\n \"create_csv\": self.ui.c_csv.isChecked(),\n \"create_draw\": self.ui.c_draw.isChecked(),\n \"create_velocity\": self.ui.c_velocity.isChecked(),\n \"create_video_fps\": int(self.ui.t_fps.text()),\n \"optimize_params\": self.ui.c_optimize.isChecked(),\n \"super_pixel_label_dir\": os.path.join(\n self.savePathJoin(\"Super_Pixel\"), self.super_pixel_method\n ),\n }", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Rdy[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rqcd[1,0.0,10.0]\");\n self.modelBuilder.doSet(\"POI\",\"Rdy,Rqcd\")", "def _generate_model_metadata(out_file, model):\n # Define which FirstLevelModel attributes are BIDS compliant and which\n # should be bundled in a new \"ModelParameters\" field.\n DATA_ATTRIBUTES = [\n \"t_r\",\n ]\n PARAMETER_ATTRIBUTES = [\n \"drift_model\",\n \"hrf_model\",\n \"standardize\",\n \"high_pass\",\n \"target_shape\",\n \"signal_scaling\",\n \"drift_order\",\n \"scaling_axis\",\n \"smoothing_fwhm\",\n \"target_affine\",\n \"slice_time_ref\",\n \"fir_delays\",\n ]\n ATTRIBUTE_RENAMING = {\n \"t_r\": \"RepetitionTime\",\n }\n\n # Fields for the top level of the dictionary\n DATA_ATTRIBUTES.sort()\n data_attributes = {\n attr_name: getattr(model, attr_name)\n for attr_name in DATA_ATTRIBUTES\n if hasattr(model, attr_name)\n }\n data_attributes = {\n ATTRIBUTE_RENAMING.get(k, k): v for k, v in data_attributes.items()\n }\n\n # Fields for a nested section of the dictionary\n # The ModelParameters field is an ad-hoc way to retain useful info.\n PARAMETER_ATTRIBUTES.sort()\n model_attributes = {\n attr_name: getattr(model, attr_name)\n for attr_name in PARAMETER_ATTRIBUTES\n if hasattr(model, attr_name)\n }\n model_attributes = {\n ATTRIBUTE_RENAMING.get(k, k): v for k, v in model_attributes.items()\n }\n\n model_metadata = {\n \"Description\": \"A statistical map generated by Nilearn.\",\n **data_attributes,\n \"ModelParameters\": model_attributes,\n }\n\n with open(out_file, \"w\") as f_obj:\n json.dump(model_metadata, f_obj, indent=4, sort_keys=True)", "def buildVariables(self, model):\n\n \"\"\"\n #Inputs\n \"\"\"\n\n \"\"\"\n #Outputs\n \"\"\"\n #-------- Register Settings Used During Calibration --------\n #auxNDiv (to be put into synth.auxfreq.mmddenom during ir cal only)\n self._addModelVariable(model, 'ircal_auxndiv', int, ModelVariableFormat.DECIMAL, units='bytes', desc='This value is predetermined.')\n #auxLoDiv (to be put into synth.divctrl.auxlodivfreqctrl during ir cal only)\n self._addModelVariable(model, 'ircal_auxlodiv', int, ModelVariableFormat.DECIMAL, units='bytes', desc='This value is predetermined.')\n #rampVal (to be put into modem.rampctrl.rampval during ir cal only)\n self._addModelVariable(model, 'ircal_rampval', int, ModelVariableFormat.DECIMAL, units='bytes', desc='This value is predetermined.')\n #rxAmp_PLL (to be put into rac.auxctrl.rxamp during PLL loopback, ir cal only)\n self._addModelVariable(model, 'ircal_rxamppll', int, ModelVariableFormat.DECIMAL, units='bytes', desc='This value is predetermined.')\n #rxAmp_PA (to be put into rac.auxctrl.rxamp during PA loopback, ir cal only)\n self._addModelVariable(model, 'ircal_rxamppa', int, ModelVariableFormat.DECIMAL, units='bytes', desc='This value is predetermined.')\n \n #-------- Decide Between Calibration Procedures --------\n #diConfigIsValid (true = DI value / PTE value is an option)\n self._addModelVariable(model, 'ircal_manufconfigvalid', bool, ModelVariableFormat.ASCII, 'True = the manufacturing calibration value is saved on the chip')\n #pllLoopbackConfigIsValid (true = PLL loopback is an option)\n self._addModelVariable(model, 'ircal_pllconfigvalid', bool, ModelVariableFormat.ASCII, 'True = PLL loopback is permitted to generate a calibration value')\n #paLoopbackConfigIsValid (true = PA loopback is an option)\n self._addModelVariable(model, 'ircal_paconfigvalid', bool, ModelVariableFormat.ASCII, 'True = PA loopback is permitted to generate a calibration value')\n #recommendedConfig (DI/PTE vs PLL loopback vs PA loopback)\n var = self._addModelVariable(model, 'ircal_bestconfig', Enum, ModelVariableFormat.DECIMAL, 'Specify the best calibration method for this radio configuration.')\n member_data = [\n ['MANUFACTURING', 1, 'Use the calibration value saved during manufacturing, if applicable.'],\n ['PLL', 2, 'Put the part into a PLL loopback to generate a calibration value.'],\n ['PA', 3, 'Put the part into a PA loopback to generate a calibration value.'],\n ['UNSUPPORTED', 4, 'Image rejection calibration not supported.'],\n ]\n var.var_enum = CreateModelVariableEnum(\n 'configType',\n 'Specify how image rejection calibration is to run.',\n member_data)\n\n #-------- Decide Between Software/Hardware RSSI Averaging --------\n self._addModelVariable(model, 'ircal_useswrssiaveraging', bool, ModelVariableFormat.ASCII, 'True = use software RSSI averaging; False = use hardware RSSI averaging')\n self._addModelVariable(model, 'ircal_numrssitoavg', int, ModelVariableFormat.DECIMAL, units='bytes', desc='Number of RSSI values (2^value) to average in software. If value = 3, 8 values will be averaged.')\n self._addModelVariable(model, 'ircal_throwawaybeforerssi', int, ModelVariableFormat.DECIMAL, units='bytes', desc='Number of RSSI values to discard before starting to average RSSI values.')\n self._addModelVariable(model, 'ircal_delayusbeforerssi', int, ModelVariableFormat.DECIMAL, units='bytes', desc='Microsecond delay between applying a calibration value and then reading RSSI values.')\n self._addModelVariable(model, 'ircal_delayusbetweenswrssi', int, ModelVariableFormat.DECIMAL, units='bytes', desc='Microsecond delay between gathering RSSI values. Software RSSI averaging mode only.')\n\n #------ Determine number of raw RSSI values averaged by hardware ------\n #agcRssiPeriod\n self._addModelVariable(model, 'ircal_agcrssiperiod', int, ModelVariableFormat.DECIMAL, units='bytes', desc='Number of raw RSSI values averaged by hardware.')\n\n #------ Registers specific to Jumbo (and new Dumbo) support ------\n self._addModelVariable(model, 'ircal_useswrssiaveraging2', bool, ModelVariableFormat.ASCII, 'True = use software RSSI averaging; False = use hardware RSSI averaging; Jumbo support')\n self._addModelVariable(model, 'ircal_numrssitoavg2', int, ModelVariableFormat.DECIMAL, units='bytes', desc='Number of RSSI values (2^value) to average in software. If value = 3, 8 values will be averaged. Jumbo support')\n self._addModelVariable(model, 'ircal_throwawaybeforerssi2', int, ModelVariableFormat.DECIMAL, units='bytes', desc='Number of RSSI values to discard before starting to average RSSI values. Jumbo support')\n self._addModelVariable(model, 'ircal_delayusbeforerssi2', int, ModelVariableFormat.DECIMAL, units='bytes', desc='Microsecond delay between applying a calibration value and then reading RSSI values. Jumbo support')\n self._addModelVariable(model, 'ircal_delayusbetweenswrssi2', int, ModelVariableFormat.DECIMAL, units='bytes', desc='Microsecond delay between gathering RSSI values. Software RSSI averaging mode only. Jumbo support')\n\n #\n # Bools not allowed as advanced inputs due to GUI constraint. Using enum instead\n var = self._addModelVariable(model, 'ircal_rxtx_path_common', Enum, ModelVariableFormat.DECIMAL, 'RX and TX are on a common/shared circuit, or split. Refer to document AN971.')\n member_data = [\n ['SHARED_RX_TX_PATH' , 0, 'RX and TX circuit paths are common/shared/connected'],\n ['SPLIT_RX_TX_PATH', 1, 'RX and TX circuit paths are separated/not connected'],\n ]\n var.var_enum = CreateModelVariableEnum(\n 'IRCalRXTXPathCommonEnum',\n 'RX and TX are on a common/shared circuit, or split. Refer to document AN971.',\n member_data)\n\n self._addModelVariable(model, 'ircal_power_level', int, ModelVariableFormat.DECIMAL, units='codes', desc='Specify IR cal power level (amplitude) instead of auto (0). Refer to document AN971.')", "def doParametersOfInterest(self):\n ''' ref : physicsmodel -> rvf\n self.modelBuilder.out.var(\"MH\").setRange(float(self.mHRange[0]),float(self.mHRange[1]))\n self.modelBuilder.out.var(\"MH\").setConstant(False)\n '''\n\n self.modelBuilder.doVar(\"mu[0,0,1000]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doVar(\"Fvbf[0,0,1]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu,Fvbf\")\n self.modelBuilder.doVar(\"\")\n self.modelBuilder.factory_('expr::ggH_s_func(\"(@0-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_b_func(\"(1-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_sbi_func(\"sqrt(@0)*(1.-@1)\", mu,Fvbf)')\n\n self.modelBuilder.factory_('expr::vbfH_s_func(\"(@0-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_b_func(\"(1-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_sbi_func(\"sqrt(@0)*(@1)\", mu,Fvbf)')", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"A0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"Afb,A0\")\n\n \n self.modelBuilder.factory_('expr::Alph(\"2.0*@0/(2.0-@0)\",A0)')\n self.modelBuilder.factory_('expr::Norm(\"3.0/4.0/(2.0+@0)\",Alph)')\n self.modelBuilder.factory_('expr::RAlph(\"@0*@1\",Alph,Norm)')\n self.modelBuilder.factory_('expr::Rpl(\"(@0+@1)\",Norm,Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(@0-@1)\",Norm,Afb)')", "def addPppParams(model):\n \n ### GAPDP Parameters ####\n model.addParameter('GAPDP','KmSub2',0.385) # nadp\n model.addParameter('GAPDP','KmProd2',0.202) # nadph\n model.addParameter('GAPDP','kcatF',2.8)\n model.addParameter('GAPDP','kcatR',0)\n\n ### FMETTRS Parameters ###\n model.addParameter('FMETTRS','kcatF',0.45)\n\n ### MTHFC Parameters ###\n model.addParameter('MTHFC','kcatF',185)\n\n #### GHMT2 Paramters ####\n model.addParameter('GHMT2','kcatF',0.0)\n model.addParameter('GHMT2','kcatR',0.0)\n \n #### TKT1 Parameters ####\n model.addParameter('TKT1',rxnFormKey='kcatF',value=20.58)\n model.addParameter('TKT1',rxnFormKey='kcatR',value=0.8)\n \n model.addParameter('TKT1',rxnFormKey='KmSub1',value=0.743) #g3p\n model.addParameter('TKT1',rxnFormKey='KmSub2',value=3.7298) #s7p\n model.addParameter('TKT1',rxnFormKey='KmProd1',value=0.4717) #r5p\n model.addParameter('TKT1',rxnFormKey='KmProd2',value=0.134) #xu5p\n \n #### TKT2 Parameters ####\n model.addParameter('TKT2',rxnFormKey='kcatF',value=26.87)\n model.addParameter('TKT2',rxnFormKey='kcatR',value=1.4)\n \n model.addParameter('TKT2',rxnFormKey='KmSub1',value=0.25) #f6p\n model.addParameter('TKT2',rxnFormKey='KmSub2',value=0.743) #g3p\n model.addParameter('TKT2',rxnFormKey='KmProd1',value=0.0227) #e4p\n model.addParameter('TKT2',rxnFormKey='KmProd2',value=0.134) #xu5p\n \n #### TALA Parameters ####\n model.addParameter('TALA',rxnFormKey='kcatF',value=22.3)\n model.addParameter('TALA',rxnFormKey='kcatR',value=0.54)\n \n model.addParameter('TALA',rxnFormKey='KmSub1',value=0.0401) #e4p\n model.addParameter('TALA',rxnFormKey='KmSub2',value=0.6688) #f6p\n model.addParameter('TALA',rxnFormKey='KmProd1',value=1.9) #g3p\n model.addParameter('TALA',rxnFormKey='KmProd2',value=0.285) #s7p\n\n \n #### Speed up DGSN Pathway ####\n model.addParameter('DGSNK',rxnFormKey='kcatF',value=2.25)\n\n #### Speed up DADN pathway ####\n model.addParameter('PUNP2',rxnFormKey='kcatF',value=13.3)\n\n #### Speed up FBA rxn ####\n #model.addParameter('FBA',rxnFormKey='kcatF',value=64.5)\n\n model.addParameter('RNDR2',rxnFormKey='KmSub1',value=0.24)\n\n \n# #### RPI Parameters ####\n model.addParameter('RPI',rxnFormKey='kcatF',value=10.0)\n model.addParameter('RPI',rxnFormKey='kcatR',value=1.0)\n \n #model.addParameter('RPI',rxnFormKey='KmSub1',value=1.0)\n #model.addParameter('RPI',rxnFormKey='KmProd1',value=1.0)\n \n model.addParameter('FBA',rxnFormKey='KmSub1',value=0.12)\n model.addParameter('FBA',rxnFormKey='KmProd2',value=0.05)\n \n \n model.addParameter('GAPD',rxnFormKey='kcatF',value=442.0) \n model.addParameter('GAPD',rxnFormKey='kcatR',value=73.6) \n \n\n model.addParameter('FBA',rxnFormKey='kcatR',value=12.6)\n \n\n model.addParameter('TPI',rxnFormKey='kcatR',value=67)\n \n model.addParameter('TPI',rxnFormKey='KmSub1',value=0.077)\n model.addParameter('TPI',rxnFormKey='KmProd1',value=0.084) \n \n\n model.addParameter('FBA',rxnFormKey='kcatF',value=21.0)\n \n \n model.addParameter('PGK',rxnFormKey='kcatR',value=3.4)\n \n model.addParameter('PGM',rxnFormKey='KmSub1',value=3.6)\n model.addParameter('PGM',rxnFormKey='KmProd1',value=0.2)\n \n \n model.addParameter('PGK',rxnFormKey='KmSub1',value=0.01)\n model.addParameter('PGK',rxnFormKey='KmProd1',value=0.1)\n \n \n model.addParameter('GAPD',rxnFormKey='KmProd1',value=0.47)\n model.addParameter('GAPD',rxnFormKey='KmProd2',value=0.061)\n \n \n model.addParameter('DRPA',rxnFormKey='kcatR',value=34.0)\n \n model.addParameter('DRPA',rxnFormKey='KmProd1',value=0.267)\n model.addParameter('DRPA',rxnFormKey='KmProd2',value=0.2)\n\n \n model.addParameter('PPM2',rxnFormKey='kcatF',value=173)\n \n model.addParameter('PPM2',rxnFormKey='KmSub1',value=0.013)\n model.addParameter('PPM2',rxnFormKey='KmProd1',value=1.2)\n\n\n\n# print('Updated PPP Parameters')\n\n return", "def doParametersOfInterest(self):\n self.modelBuilder.doVar(\"kappa_W[1,0.0,2.0]\") \n self.modelBuilder.doVar(\"kappa_Z[1,0.0,2.0]\") \n self.modelBuilder.doVar(\"kappa_tau[1,0.0,3.0]\")\n self.modelBuilder.doVar(\"kappa_mu[1,0.0,5.0]\") \n self.modelBuilder.factory_(\"expr::kappa_mu_expr(\\\"@0*@1+(1-@0)*@2\\\", CMS_use_kmu[0], kappa_mu, kappa_tau)\")\n self.modelBuilder.doVar(\"kappa_t[1,0.0,4.0]\")\n # additional kappa for the anomalous coupling\n self.modelBuilder.doVar(\"kappa_tilde_t[0.0,0.0,4.0]\")\n self.modelBuilder.doVar(\"kappa_b[1,0.0,3.0]\")\n if not self.resolved:\n self.modelBuilder.doVar(\"kappa_g[1,0.0,2.0]\")\n self.modelBuilder.doVar(\"kappa_gam[1,0.0,2.5]\")\n\tself.modelBuilder.doVar(\"BRinv[0,0,1]\")\n self.modelBuilder.out.var(\"BRinv\").setConstant(True)\n # adding additional kappa to list of parameters of interest\n pois = 'kappa_W,kappa_Z,kappa_tau,kappa_t,kappa_tilde_t,kappa_b'\n if not self.resolved:\n pois += ',kappa_g,kappa_gam'\n self.doMH()\n self.modelBuilder.doSet(\"POI\",pois)\n # use modified Higgs Builder\n self.SMH = AnomalousTopHiggsBuilder(self.modelBuilder)\n self.setup()", "def set_model_ps(filepath, dicname='PARAMETERS'):\n psmod = importlib.import_module(path_to_modline(filepath))\n model.PARAMETERS = getattr(psmod, dicname)", "def load_model(self,\n model: Union[str, io.IOBase, DM],\n name: Optional[str] = None):\n super().load_model(model, name=name)\n content = self.model[self.modelroot]\n\n self.key = content['key']\n self.id = content['id']\n self.family = content['system-family']\n self.__parameters = []\n for cp in content.aslist('calculation-parameter'):\n self.__parameters.append(dict(cp))", "def read_model(modelfile, dictlist):\n global dxdict\n global dxlist\n global import_img\n dxdict, dxlist = {}, [] # the list is needed for fixed ordering\n mod = io.open(modelfile, 'r')\n st = next(mod)\n ### image adress is found\n while 'SCHEME_IMAGE' not in st:\n st = next(mod)\n #image_adress = st.strip().split()[-1]\n #import_img = ImageTk.PhotoImage(Image.open(image_adress).resize((496, 384), Image.ANTIALIAS))\n #scheme.configure(image = import_img)\n ### the file must contain equations for ODE between ***STATES*** and ***END*** statements\n while \"***STATES***\" not in st:\n st = next(mod)\n #\n while \"***END***\" not in st:\n st = next(mod)\n try:\n dxdict[st.split('=')[0].strip()] = st.split('=')[1].strip().strip(';')\n dxlist.append(st.split('=')[0].strip())\n except:\n continue\n ## now, add dict names to the equations\n ## also, add state names to the PREDEFINED dict\n for s in dxdict.keys():\n for d in dictlist:\n keys = d + '.keys()'\n for k in eval(keys):\n dxdict[s] = dxdict[s].replace(k, \"%(d)s['%(k)s']\" % vars())\n ##\n for i in dxdict.keys():\n for j in dxdict.keys():\n if \"Xdict['%(j)s']\" % vars() not in dxdict[i]:\n dxdict[i] = dxdict[i].replace(j, \"Xdict['%(j)s']\" % vars())\n modelprint, nstates = os.path.basename(modelfile), len(dxlist)", "def initParms(self):\n self.parmVal = []\n self.parmName = []\n (nvect, npt) = self.data.shape\n if self.model == 0:\n self.parmVal.append(2.0)\n self.parmName.append('Order')\n if self.model == 1:\n self.parmVal.append(1.0)\n self.parmName.append('A')\n self.parmVal.append(1.0)\n self.parmName.append('B')\n if self.model == 2:\n self.parmVal.append(self.data[1][0])\n self.parmName.append('A')\n self.parmVal.append(self.data[1][npt-1])\n self.parmName.append('B')\n if self.model == 3:\n self.parmVal.append(self.data[1][0])\n self.parmName.append('Ao')\n self.parmVal.append(100.0)\n self.parmName.append('Ea')\n if self.model == 4:\n self.parmVal.append(0.001)\n self.parmName.append('A')\n self.parmVal.append(1.0)\n self.parmName.append('B')\n if self.model == 5:\n self.parmVal.append(0.001)\n self.parmName.append('A')\n self.parmVal.append(0.0)\n self.parmName.append('B')\n self.parmVal.append(1.0)\n self.parmName.append('C')\n if self.model == 6:\n self.parmVal.append(self.data[0][0])\n self.parmName.append('xo')\n self.parmVal.append(self.data[1][0])\n self.parmName.append('yo')\n yspan = getSpan(self.data[1])\n if self.data[1][0] > 0.0:\n v = self.data[1][0] + yspan/2.0\n else:\n v = self.data[1][npt-1] + yspan/2.0\n self.parmVal.append(v)\n self.parmName.append('H')\n if self.data[1][0] > self.data[1][npt-1]:\n self.parmVal.append(-1.0)\n else:\n self.parmVal.append(1.0)\n self.parmName.append('S')", "def get_model_config(model_name, args):\n if model_name == 'Tacotron2':\n model_config = dict(\n # optimization\n mask_padding=args.mask_padding,\n # audio\n n_mel_channels=args.n_mel_channels,\n # symbols\n n_symbols=args.n_symbols,\n symbols_embedding_dim=args.symbols_embedding_dim,\n # encoder\n encoder_kernel_size=args.encoder_kernel_size,\n encoder_n_convolutions=args.encoder_n_convolutions,\n encoder_embedding_dim=args.encoder_embedding_dim,\n # attention\n attention_rnn_dim=args.attention_rnn_dim,\n attention_dim=args.attention_dim,\n # attention location\n attention_location_n_filters=args.attention_location_n_filters,\n attention_location_kernel_size=args.attention_location_kernel_size,\n # decoder\n n_frames_per_step=args.n_frames_per_step,\n decoder_rnn_dim=args.decoder_rnn_dim,\n prenet_dim=args.prenet_dim,\n max_decoder_steps=args.max_decoder_steps,\n gate_threshold=args.gate_threshold,\n p_attention_dropout=args.p_attention_dropout,\n p_decoder_dropout=args.p_decoder_dropout,\n # postnet\n postnet_embedding_dim=args.postnet_embedding_dim,\n postnet_kernel_size=args.postnet_kernel_size,\n postnet_n_convolutions=args.postnet_n_convolutions,\n decoder_no_early_stopping=args.decoder_no_early_stopping\n )\n return model_config\n elif model_name == 'WaveGlow':\n model_config = dict(\n n_mel_channels=args.n_mel_channels,\n n_flows=args.flows,\n n_group=args.groups,\n n_early_every=args.early_every,\n n_early_size=args.early_size,\n WN_config=dict(\n n_layers=args.wn_layers,\n kernel_size=args.wn_kernel_size,\n n_channels=args.wn_channels\n )\n )\n return model_config\n else:\n raise NotImplementedError(model_name)", "def model_4_parameters(num_features, num_classes, image_info):\n parameters = {}\n parameters['num_features'] = num_features\n parameters['num_classes'] = num_classes\n if image_info['key'][:5] == \"pavia\":\n parameters['C'] = 1.0\n else:\n parameters['C'] = 40.0\n \n return parameters" ]
[ "0.6502187", "0.64847857", "0.6467319", "0.642566", "0.63406587", "0.6313643", "0.62799996", "0.61906505", "0.6187379", "0.61486065", "0.6145462", "0.6080124", "0.6062548", "0.60373634", "0.59718966", "0.5930781", "0.59303004", "0.5926543", "0.5897012", "0.58648956", "0.58574134", "0.58505493", "0.5848819", "0.57972753", "0.5773693", "0.5760757", "0.57434", "0.5731757", "0.572664", "0.5724693" ]
0.7351766
0
separate print plot and component statements
def separatePlot(self,schematicInfo): compInfo = [] plotInfo = [] for eachline in schematicInfo: words = eachline.split() if words[0] == 'run': continue elif words[0] == 'plot' or words[0] == 'print': plotInfo.append(eachline) else: compInfo.append(eachline) return compInfo, plotInfo
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display(self):\n for x, p in zip(self.xs, self.ps):\n print(x, p)", "def render(self, mode='human', type_of='color'):\n if type_of == 'color':\n plot_color(self.observation, self.servers_mem ,self.services_mem)\n elif type_of=='black':\n plot_black(self.observation, self.servers_mem ,self.services_mem)\n print('\\n----services mem----\\n')\n print('services resource usage: {}\\n'.format(self.services_mem))\n print('servers capacity: {}\\n'.format(self.servers_mem))\n print('\\n----observation----\\n')\n print('service placements: {}\\n'.format(self.observation))", "def printfunc(self):\n zero1=self.Newton(True)\n print \"Using initial porition %0.2f ,%0.2f\" %(self.x_init,self.y_0)\n print \"extremum calculated witn Newton-Rapson: %0.2f ,%0.2f.\"%(zero1[0],zero1[1])\n zero2=self.Newton(False)\n print \"extremum calculated witn Secant: %0.2f ,%0.2f.\" %(zero2[0],zero2[1])\n xlist=np.arange(self.x_0-10,self.x_0+10,0.01)\n ylist=np.arange(self.y_0-10,self.y_0+10,0.01)\n X,Y=np.meshgrid(xlist,ylist)\n Z=self.sfunc(X,Y)\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n \n ax.plot(xlist, ylist, self.sfunc(xlist,ylist), 'g-',label='function $e^{(-(x-%0.2f)^2-(y-%0.2f)^2)}$' %(self.x_0,self.y_0))\n ax.contour(X, Y, Z)# colors = 'k', linestyles = 'solid')\n ax.plot([zero1[0]], [zero1[0]], self.sfunc(zero1[0],zero1[1]),'bo',label='extrema using Newton-Rapson (%0.2f; %0.2f)'%(zero1[0],zero1[1]))\n ax.plot([zero2[0]], [zero2[0]], self.sfunc(zero2[0],zero2[1]),'ro',label='extrema using Seacent (%0.2f; %0.2f)'%(zero2[0],zero2[1]))\n ax.legend()\n plt.show()", "def display(self):\n print(\"{}, {}\".format(self.label, self.params))", "def _debugPlotExamplesOfAll():\n\tbpData(plot=True)\n\tcapBankData(plot=True)\n\tcos1RogowskiData(plot=True)\n\tquartzJumperData(plot=True)\n\tfbData(plot=True)\n\tipData(plot=True)\n\tloopVoltageData(plot=True)\n\tmModeData(plot=True)\n\tnModeData(plot=True)\n\tpaData(plot=True)\n\tplasmaRadiusData(plot=True)\n\tqStarData(plot=True)\n\tsolData(plot=True)\n\tspectrometerData(plot=True)\n\ttaData(plot=True)\n\ttfData(plot=True)\n\ttpData(plot=True)\n\tsxrData(plot=True)", "def setDisplay(self):\n self.graph_display=[self.complexCompose(self.coefficients,(t+1)/self.display_number)[-1] for t in range(self.display_number)]", "def print(self):\r\n self.print_avec_separateur()", "def my_print(self):\n length = self.__size\n\n if self.__size == 0:\n print(\"\")\n\n \"\"\"Print using position of y-axis.\"\"\"\n for i in range(self.__position[1]):\n print(\"\")\n for j in range(length):\n \"\"\"Print spaces and # in x-axis.\"\"\"\n print((\" \" * self.__position[0]) + (\"#\" * length))", "def plot(self):\n\t\tself.plotOfSpect()", "def render(self):\n map = {0:'.', 1:'x', 2:'o'} # grid label vs how to plot\n print(''.join(map[i] for i in self.grid[0:3]))\n print(''.join(map[i] for i in self.grid[3:6]))\n print(''.join(map[i] for i in self.grid[6:9]))\n print('====')", "def render(self):\n map = {0:'.', 1:'x', 2:'o'} # grid label vs how to plot\n print(''.join(map[i] for i in self.grid[0:3]))\n print(''.join(map[i] for i in self.grid[3:6]))\n print(''.join(map[i] for i in self.grid[6:9]))\n print('====')", "def display(self):\n \"\"\" Coordinates for position are x-axis (LR) and y-axis (NS) \"\"\"\n for coordY in range(self.y):\n print()\n for column in range(self.height):\n for coordLR in range(self.x):\n print(\" \", end=\"\")\n for row in range(self.width):\n print(\"#\", end=\"\")\n print()", "def __repr__ (self) :\n print (self.Win)\n print (self.Whid)\n print (self.Wout)\n plt.plot (range (len (self.xvec)), self.xvec [:])\n plt.show ()\n return \"\"", "def plot(self):\n pass", "def vis_rep(corex, data=None, row_label=None, column_label=None, prefix='corex_output', focus='', topk=5):\n if column_label is None:\n column_label = list(map(str, range(data.shape[1])))\n if row_label is None:\n row_label = list(map(str, range(corex.n_samples)))\n\n alpha = corex.alpha[:, :, 0]\n\n print('Groups in sorted_groups.txt')\n output_groups(corex.tcs, alpha, corex.mis, column_label, prefix=prefix)\n output_labels(corex.labels, row_label, prefix=prefix)\n output_strong(corex.tcs, alpha, corex.mis, corex.labels, prefix=prefix)\n anomalies(corex.log_z, row_label=row_label, prefix=prefix)\n plot_convergence(corex.tc_history, prefix=prefix)\n\n if data is not None:\n print('Pairwise plots among high TC variables in \"relationships\"')\n data_to_plot = np.where(data == corex.missing_values, np.nan, data)\n log_p_y_given_x = calculate_log_latent(corex, data)\n cont = cont3(log_p_y_given_x)\n output_cont_labels(log_p_y_given_x, row_label, prefix=prefix)\n plot_heatmaps(data_to_plot, corex.labels, alpha, corex.mis, column_label, cont, prefix=prefix, focus=focus)\n plot_pairplots(data_to_plot, corex.labels, alpha, corex.mis, column_label, prefix=prefix, focus=focus, topk=topk)\n plot_top_relationships(data_to_plot, corex.labels, alpha, corex.mis, column_label, cont, prefix=prefix, topk=topk)\n # plot_top_relationships(data_to_plot, corex.labels, alpha, mis, column_label, corex.log_z[:,:,0].T, prefix=prefix+'anomaly_')", "def plot(self):\n\t\tself.plotOfLoopVoltage()", "def _showdata(self, prec=4):\n print('nh {0:d} nslices {1:d} nbl {2:d} ncp {3:d} nca {4:d} '.format(\n self.nh, self.nslices, self.nbl, self.ncp, self.nca), end=\"\")\n print(\"observables in np arrays with {:d} rows\".format(self.nslices))\n\n if len(self.observables) == 4:\n print('nca', self.nca)\n else:\n print()\n np.set_printoptions(precision=prec)\n\n print(self.fp.shape, \"fp (degrees, but stored internally in radians):\\n\",\n self.fp*self.degree, \"\\n\")\n print(self.fa.shape, \"fa:\\n\", self.fa, \"\\n\")\n\n print(self.cp.shape, \"cp (degrees, but stored internally in radians):\\n\",\n self.cp*self.degree, \"\\n\")\n if len(self.observables) == 4:\n print(self.ca.shape, \"ca:\\n\", self.ca, \"\\n\")\n # print(self.info4oif_dict)\n\n print(\"hole centers array shape:\", self.ctrs.shape)\n\n print(len(self.bholes), \"baseline hole indices\\n\", self.bholes)\n print(self.bls.shape, \"baselines:\\n\", self.bls)\n\n print(self.tholes.shape, \"triple hole indices:\\n\", self.tholes)\n print(self.tuv.shape, \"triple uv vectors:\\n\", self.tuv)\n\n print(self.qholes.shape, \"quad hole indices:\\n\", self.qholes)\n print(self.quvw.shape, \"quad uvw vectors:\\n\", self.quvw)", "def show_plot() :\n logger.info(\"Show plot\")\n pylab.axis('equal')\n pylab.xlabel(\"Longitud\")\n pylab.ylabel(\"Latitud\")\n pylab.grid(True)\n pylab.title(\"Product tiles and product source\")\n pylab.show()", "def render(self, mode='human', type_of='color'):\n if type_of == 'color':\n plot_color(self.observation[0:self.num_of_services],\n self.servers_mem ,self.services_mem)\n elif type_of=='black':\n plot_black(self.observation[0:self.num_of_services],\n self.servers_mem ,self.services_mem)\n print('\\n----services mem----\\n')\n print('services resource usage: {}'.format(self.services_mem))\n print('servers capacity: {}'.format(self.servers_mem))\n print('users services: {}'.format(self.users_services))\n print('\\n----observation----\\n')\n print('services servers: {}'.format(self.observation[0:self.num_of_services]))\n print('users stations: {}'.format(self.observation[self.num_of_services:]))", "def print_methods():\n print('''1. Sobol Variance Based:\n first and total order''')\n print('''2. Regional Sensitivity Analysis:\n also called Monte Carlo Filtering''')\n print('''3. Morris Screening Method:\n with pre-optimized defined trajects and group option''')\n print('''4. Sampled-OAT:\n Latin HYpercube or Sobol sampling with OAT sensitivity''')\n print('''5. Standardized Regression Coefficients:\n Latin HYpercube or Sobol sampling with linear regression''')\n print('''6. DYNamic Identifiability Analysis:\n Latin HYpercube or Sobol sampling with time-sliced based\n evaluation''')", "def plot():\n pass", "def status(self):\n\n for index, x in enumerate(self.lot):\n print('|', end='')\n for spot, value in enumerate(x):\n if value == 1:\n print(\"|\", end='')\n if value == 2:\n print(\" |\", end='')\n if value == 3:\n print(\" |\", end='')\n if value == -1:\n print(\"X|\", end='')\n if value == -2:\n print(\"XXX|\", end='')\n if value == -3:\n print(\"XXXXX|\", end='')\n print()", "def show(self):\n import Helpers\n for p in self.parts:\n color = (p[1][0]*255, p[1][1]*255, p[1][2]*255, 0)\n Helpers.show(p[0], color)", "def printDot(system,out):\n print_header(out)\n #printVariablesDot(system,out)\n printRulesDot(system,out)\n print_footer(out)", "def display4(*args):\n #-------------------- unpack\n twiss_func = args[0]\n cos_like = args[1]\n sin_like = args[2]\n lat_plot = args[3]\n #-------------------- beta x,y & dispersion x\n s = [twiss_func(i,'s') for i in range(twiss_func.nbpoints)] # Abszisse\n bx = [twiss_func(i,'bx') for i in range(twiss_func.nbpoints)] # beta x\n by = [twiss_func(i,'by') for i in range(twiss_func.nbpoints)] # beta y\n dx = [twiss_func(i,'dx') for i in range(twiss_func.nbpoints)] # dispersion x\n#-------------------- longitudinal trajectories\n z1= [cos_like(i,'s') for i in range(cos_like.nbpoints)]\n cz= [cos_like(i,'cz') for i in range(cos_like.nbpoints)]\n cdp= [cos_like(i,'cdp') for i in range(cos_like.nbpoints)]\n\n z2= [sin_like(i,'s') for i in range(sin_like.nbpoints)]\n sz= [sin_like(i,'sz') for i in range(sin_like.nbpoints)]\n sdp= [sin_like(i,'sdp') for i in range(sin_like.nbpoints)]\n #-------------------- lattice viseo\n vzero = [0. for i in range(lat_plot.nbpoints)] # zero line\n vis_abszisse = [lat_plot(i,'s') for i in range(lat_plot.nbpoints)]\n vis_ordinate = [lat_plot(i,'viseo') for i in range(lat_plot.nbpoints)]\n #-------------------- figure frame\n width=14; height=7.6\n # fighdr = 'lattice version = {}, input file = {}'.format(PARAMS['lattice_version'],PARAMS['input_file'])\n fig = plt.figure(num=1,figsize=(width,height),facecolor='#eaecef',tight_layout=False)\n\n #-------------------- beta functions\n splot211=plt.subplot(211)\n splot211.set_title('beta x,y')\n # mapping box\n splot211.text(0.01, 1.1, UTIL.FLAGS.get('mapping'),transform=splot211.transAxes,fontsize=8,bbox=dict(boxstyle='round',facecolor='wheat',alpha=0.5),verticalalignment='top')\n # function plots\n plt.plot(s,bx, label=r\"$\\beta$x [m]\", color='black', linestyle='-')\n plt.plot(s,by, label=r\"$\\beta$y [m]\", color='red', linestyle='-')\n plt.plot(s,dx, label=r'$\\eta_x$ [m]' , color='green', linestyle='-') # dispersion x\n vscale=splot211.axis()[3]*0.25\n viseoz = [x*vscale for x in vis_ordinate]\n plt.plot(vis_abszisse,viseoz,label='',color='black')\n plt.plot(vis_abszisse,vzero,color='green',linestyle='--')\n # zero line\n splot211.plot(vis_abszisse,vzero,color='green',linestyle='--')\n plt.legend(loc='lower right',fontsize='x-small')\n\n #-------------------- longitudinal tracks z, dP/P\n # ax_l = left abszisse\n ax_l=plt.subplot(212)\n # ax_l=plt.subplot(10,1,(7,9))\n ax_l.set_title('synchrotron oscillation')\n ax_l.set_ylabel(r\"z [mm]\")\n ax_l.tick_params(axis='y', colors='green')\n ax_l.yaxis.label.set_color('green')\n ax_l.plot(z1,cz,label='C',color='green')\n ax_l.plot(z2,sz,label='S',color='green',linestyle=':')\n plt.legend(loc='lower left',fontsize='x-small')\n # ax_r = right abszisse\n ax_r = ax_l.twinx()\n ax_r.set_ylabel(r'$\\Delta$p/p [%]')\n ax_r.tick_params(axis='y', colors='red')\n ax_r.yaxis.label.set_color('red')\n ax_r.plot(z2,cdp,label='C',color='red')\n ax_r.plot(z2,sdp,label='S',color='red',linestyle=':')\n ax_r.plot(vis_abszisse,vzero,color='red', linestyle='--')\n plt.legend(loc='lower right',fontsize='x-small')\n # lattice elements\n vscale=ax_l.axis()[3]*0.25\n viseoz = [x*vscale for x in vis_ordinate]\n ax_l.plot(vis_abszisse,viseoz,label='',color='black')\n ax_l.plot(vis_abszisse,vzero,color='green',linestyle='--')", "def print_models(G_XtoY, G_YtoX, D_X, D_Y):\n print(\" G_XtoY \")\n print(\"-----------------------------------------------\")\n print(G_XtoY)\n print()\n\n print(\" G_YtoX \")\n print(\"-----------------------------------------------\")\n print(G_YtoX)\n print()\n\n print(\" D_X \")\n print(\"-----------------------------------------------\")\n print(D_X)\n print()\n\n print(\" D_Y \")\n print(\"-----------------------------------------------\")\n print(D_Y)\n print()", "def showPlot1(): \n raise NotImplementedError", "def showPlot1():\n\n interested_in = list(range(5,30,5))\n proc_sim_data = []\n for item in interested_in:\n len_sim_data = []\n raw_sim_data = runSimulation(1, 1.0, item, item, 0.75, 100, Robot, False)\n for mes in raw_sim_data:\n len_sim_data.append(len(mes))\n proc_sim_data.append(sum(len_sim_data)/len(len_sim_data))\n plot(interested_in, proc_sim_data)\n title('Dependence of cleaning time on room size')\n xlabel('area of the room (tiles)')\n ylabel('mean time (clocks)')\n show()", "def _print(self):\n print('center :', self.center, ' widht : ', self.width, ' height : ', self.height, ' heat : ', self.heat,\n ' speed ', self.speed)", "def print(self):\n # it would be nice just to add one point instead of printing all again from scratch\n stones_player_0 = [(i, j) for i in range(self.size) for j in range(self.size) if self.board[i, j] == -1]\n stones_player_1 = [(i, j) for i in range(self.size) for j in range(self.size) if self.board[i, j] == 1]\n plt.plot([0, self.size-1, 0, self.size-1], [0, 0, self.size-1, self.size-1], marker='x', ls='')\n plt.plot(*zip(*stones_player_0), marker='o', color='r', ls='')\n plt.plot(*zip(*stones_player_1), marker='o', color='b', ls='')\n\n plt.draw()\n plt.show(block=False)" ]
[ "0.61761427", "0.6136814", "0.6121266", "0.61151016", "0.6107716", "0.6094556", "0.60828936", "0.60395825", "0.6032903", "0.6008571", "0.6008571", "0.5999056", "0.5972926", "0.5953528", "0.5943987", "0.59269154", "0.58774036", "0.58771664", "0.58751196", "0.58711714", "0.5861035", "0.5851666", "0.5828691", "0.58208245", "0.5794732", "0.57931536", "0.5789201", "0.578656", "0.57654285", "0.57473415" ]
0.63181067
0
Take subcircuit name and give the info related to parameters in the first line and initislise it in
def getSubParamLine(self,subname, numNodesSub, subParamInfo,dir_name): #nodeSubInterface = [] subOptionInfo_p = [] subSchemInfo_p = [] filename_t = subname + '.sub' filename_t = os.path.join(dir_name, filename_t) data_p = self.readNetlist(filename_t) subOptionInfo_p, subSchemInfo_p = self.separateNetlistInfo(data_p) if len(subOptionInfo_p) > 0: newline = subOptionInfo_p[0] newline = newline.split('.subckt '+ subname) intLine = newline[1].split() print "numNodesSub Index---------->",numNodesSub newindex = numNodesSub[subname] appen_line = intLine[newindex:len(intLine)] appen_param = ','.join(appen_line) paramLine = 'parameter Real ' + appen_param + ';' paramLine = paramLine.translate(maketrans('{}', ' ')) subParamInfo.append(paramLine) return subParamInfo
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def append_subcircuit(self, lines: Tuple[int, int], circuit: Circuit, content: str) -> None:", "def __init__(self,\n name,\n vertices_location,\n connectivity,\n connectivity_ids=None,\n connectivity_label=None,\n connectivity_label_metadata=None,\n connectivity_colormap = None,\n connector_size=2.6,\n global_deselect_alpha=0.2,\n global_select_alpha=1.0,\n skeleton_linewidth=2.0):\n super(Microcircuit, self).__init__( name )\n\n if not connectivity_ids is None:\n self.connectivity_ids = connectivity_ids\n\n if not connectivity_label is None:\n self.connectivity_labels = connectivity_label\n\n if not connectivity_label_metadata is None:\n\n for semanticdict in connectivity_label_metadata:\n # name needs to be based on convention, TODO: best from ontology id rather than string!\n # TODO: use microcircuit convention\n if semanticdict.has_key(\"name\"):\n name = semanticdict[\"name\"]\n if \"skeleton\" in name:\n self.con_skeleton = int(semanticdict[\"value\"])\n elif \"presynaptic\" in name:\n self.con_pre = int(semanticdict[\"value\"])\n elif \"postsynaptic\" in name:\n self.con_post = int(semanticdict[\"value\"])\n\n else:\n # TODO: default\n self.con_skeleton = 1\n self.con_pre = 2\n self.con_post = 3\n\n # selection stores integer ids from connectivity_selectionID\n # when selected\n self.skeleton_selection = []\n\n # use the connectivity labels to extract the connectivity for the skeletons\n self.index_skeleton = np.where(self.connectivity_labels == self.con_skeleton)[0]\n self.index_allpre = np.where(self.connectivity_labels == self.con_pre)[0]\n self.index_allpost = np.where(self.connectivity_labels == self.con_post)[0]\n \n self.vertices = vertices_location\n self.connectivity = connectivity\n\n connectivity_skeleton = self.connectivity[self.index_skeleton]\n self.vertices_skeleton = self.vertices[ connectivity_skeleton.ravel() ]\n \n # we have a simplified connectivity now\n self.connectivity_skeleton = np.array( range(len(self.vertices_skeleton)), dtype = np.uint32 )\n self.connectivity_skeleton = self.connectivity_skeleton.reshape( (len(self.connectivity_skeleton)/2, 2) )\n self.connectivity_ids_skeleton = self.connectivity_ids[ self.index_skeleton ]\n\n # look up the start and end vertex id\n # map these to _skeleton arrays, and further to actor???\n\n # colors for skeletons\n if isinstance(connectivity_colormap, dict) and connectivity_colormap.has_key( self.con_skeleton ):\n self.connectivity_skeleton_colors = np.repeat(connectivity_colormap[self.con_skeleton], len(self.connectivity_skeleton), axis=0).astype( np.float32 )\n\n ##########\n # Incoming connectors\n ##########\n\n # extract the pre connectivity and create cones\n # store the indices for to be used to create the vector scatter\n # by itself, it represent implicitly the index used to select/deselect the vectors\n if len(self.index_allpre) == 0:\n if DEBUG:\n print \"no presynaptic connection\"\n self.pre_actor = None\n else:\n self.vertices_pre = self.vertices[ connectivity[self.index_allpre].ravel() ]\n self.pre_p1 = self.vertices_pre[::2, :] # data is NOT copied here\n self.pre_p2 = self.vertices_pre[1::2, :]\n pren = len(self.index_allpre)\n r1 = np.ones( pren, dtype = np.float32 ) * connector_size\n r2 = np.zeros( pren, dtype = np.float32 )\n if isinstance(connectivity_colormap, dict) and connectivity_colormap.has_key( self.con_pre ):\n preval = np.ones( pren, dtype = np.dtype(type(self.con_pre)) ) * self.con_pre\n else:\n preval = None\n self.pre_actor = VectorScatter( \"PreConnector\", self.pre_p1, self.pre_p2, r1, r2, values = preval,\n resolution = 8, colormap = connectivity_colormap )\n # len(self.index_pre) = len(self.pre_p1) = len(preval)\n\n ##########\n # Outgoing connectors\n ##########\n\n # extract the post connectivity and create cones\n if len(self.index_allpost) == 0:\n if DEBUG:\n print \"no postsynaptic connection\"\n self.post_actor = None\n else:\n self.vertices_post = self.vertices[ connectivity[self.index_allpost].ravel() ]\n self.post_p1 = self.vertices_post[::2, :]\n self.post_p2 = self.vertices_post[1::2, :]\n postn = len(self.index_allpost)\n r1 = np.zeros( postn, dtype = np.float32 )\n r2 = np.ones( postn, dtype = np.float32 ) * connector_size\n if isinstance(connectivity_colormap, dict) and connectivity_colormap.has_key( self.con_post ):\n postval = np.ones( postn, dtype = np.dtype(type(self.con_post)) ) * self.con_post\n else:\n postval = None\n self.post_actor = VectorScatter( \"PostConnector\", self.post_p1, self.post_p2, r1, r2, values = postval,\n resolution = 8, colormap = connectivity_colormap )\n\n ##########\n # Skeletons\n ##########\n self.skeleton = Skeleton( name = \"Polygon Lines\",\n vertices = self.vertices_skeleton,\n connectivity = self.connectivity_skeleton,\n connectivity_colors = self.connectivity_skeleton_colors,\n connectivity_ID = self.connectivity_ids_skeleton,\n linewidth = skeleton_linewidth,\n global_deselect_alpha = global_deselect_alpha,\n global_select_alpha = global_select_alpha )\n\n self.connectivity_skeletononly_ids = None\n self.connectivity_preonly_ids = None\n self.connectivity_postonly_ids = None\n\n self.global_deselect_alpha = global_deselect_alpha\n self.global_select_alpha = global_select_alpha", "def connectInfo(self,compInfo, node, nodeDic, numNodesSub,subcktName):\n connInfo = []\n print \"compinfo-------->\",compInfo\n sourcesInfo = self.separateSource(compInfo)\n for eachline in compInfo:\n words = eachline.split()\n print \"eachline----->\",eachline\n print \"eachline[0]------->\",eachline[0]\n if eachline[0]=='r' or eachline[0]=='R' or eachline[0]=='c' or eachline[0]=='C' or eachline[0]=='d' or eachline[0]=='D' \\\n or eachline[0]=='l' or eachline[0]=='L' or eachline[0]=='v' or eachline[0]=='V':\n conn = 'connect(' + words[0] + '.p,' + nodeDic[words[1]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.n,' + nodeDic[words[2]] + ');'\n connInfo.append(conn)\n elif eachline[0]=='q' or eachline[0]=='Q':\n print \"Inside Transistor--->\"\n print \"Node Dict------>\",nodeDic\n conn = 'connect(' + words[0] + '.C,' + nodeDic[words[1]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.B,' + nodeDic[words[2]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.E,' + nodeDic[words[3]] + ');'\n connInfo.append(conn)\n elif eachline[0]=='m' or eachline[0]=='M':\n conn = 'connect(' + words[0] + '.D,' + nodeDic[words[1]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.G,' + nodeDic[words[2]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.S,' + nodeDic[words[3]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.B,' + nodeDic[words[4]] + ');'\n connInfo.append(conn)\n elif eachline[0] in ['f','h','F','H']:\n vsource = words[3]\n sourceNodes = sourcesInfo[vsource]\n sourceNodes = sourceNodes.split()\n conn = 'connect(' + words[0] + '.p1,'+ nodeDic[sourceNodes[0]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.n1,'+ nodeDic[sourceNodes[1]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.p2,'+ nodeDic[words[1]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.n2,'+ nodeDic[words[2]] + ');'\n connInfo.append(conn)\n elif eachline[0] in ['g','e','G','E']:\n conn = 'connect(' + words[0] + '.p1,'+ nodeDic[words[3]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.n1,'+ nodeDic[words[4]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.p2,'+ nodeDic[words[1]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.n2,'+ nodeDic[words[2]] + ');'\n connInfo.append(conn)\n elif eachline[0] in ['g','e','G','E']:\n conn = 'connect(' + words[0] + '.p1,'+ nodeDic[words[3]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.n1,'+ nodeDic[words[4]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.p2,'+ nodeDic[words[1]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.n2,'+ nodeDic[words[2]] + ');'\n connInfo.append(conn)\n elif eachline[0]=='x' or eachline[0]=='X':\n templine = eachline.split()\n temp = templine[0].split('x')\n index = temp[1]\n for i in range(0,len(templine),1):\n if templine[i] in subcktName: #Ask Manas Added subcktName in function Call\n subname = templine[i]\n nodeNumInfo = self.getSubInterface(subname, numNodesSub)\n for i in range(0, numNodesSub[subname], 1):\n #conn = 'connect(' + subname + '_instance' + index + '.' + nodeDic[nodeNumInfo[i]] + ',' + nodeDic[words[i+1]] + ');'\n conn = 'connect(' + subname + '_instance' + index + '.' + 'n'+ nodeNumInfo[i] + ',' + nodeDic[words[i+1]] + ');'\n connInfo.append(conn)\n else:\n continue\n if '0' or 'gnd' in node:\n conn = 'connect(g.p,n0);'\n connInfo.append(conn)\n \n return connInfo", "def test_construct_subcircuit(self):\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n\r\n def circuit(a, b, c):\r\n qml.RX(a, wires=0)\r\n qml.RY(b, wires=0)\r\n qml.CNOT(wires=[0, 1])\r\n qml.PhaseShift(c, wires=1)\r\n return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliX(1))\r\n\r\n circuit = qml.QNode(circuit, dev)\r\n tapes = circuit.metric_tensor(1.0, 1.0, 1.0, only_construct=True)\r\n assert len(tapes) == 3\r\n\r\n # first parameter subcircuit\r\n assert len(tapes[0].operations) == 1\r\n assert isinstance(tapes[0].operations[0], qml.Hadamard) # PauliX decomp\r\n\r\n # second parameter subcircuit\r\n assert len(tapes[1].operations) == 4\r\n assert isinstance(tapes[1].operations[0], qml.RX)\r\n # PauliY decomp\r\n assert isinstance(tapes[1].operations[1], qml.PauliZ)\r\n assert isinstance(tapes[1].operations[2], qml.S)\r\n assert isinstance(tapes[1].operations[3], qml.Hadamard)\r\n\r\n # third parameter subcircuit\r\n assert len(tapes[2].operations) == 4\r\n assert isinstance(tapes[2].operations[0], qml.RX)\r\n assert isinstance(tapes[2].operations[1], qml.RY)\r\n assert isinstance(tapes[2].operations[2], qml.CNOT)\r\n # Phase shift generator\r\n assert isinstance(tapes[2].operations[3], qml.QubitUnitary)", "def get_circuit(self, params):\n raise NotImplementedError", "def __init__(self, n_qubit:int, copies:int=1,\n rotation_blocks:Optional[Union[str, cirq.Gate, Callable, 'TemplateCircuitBlock',\n List[str],List[cirq.Gate],List[Callable],\n List['TemplateCircuitBlock']]] =None,\n entanglement_blocks:Optional[Union[str, cirq.Gate, Callable, 'TemplateCircuitBlock',\n List[str],List[cirq.Gate],List[Callable],\n List['TemplateCircuitBlock']]] =None,\n entangle_strategy:Optional[Union[str,List[str], Callable[[int,int],List[Tuple[int]]],\n List[Callable[[int,int],List[Tuple[int]]]]]]=None,\n parameter_symbol:str='θ',\n final_rotation_layer:bool=False,\n flatten_circuit:bool=False,\n reuse_param_per_depth:bool=False,\n reuse_param_per_layer:bool=False,\n reuse_param_per_template:bool=False,\n parameter_index:Optional[int]=None,\n parameter_scale=1,\n name:str='ParameterisedCircuit',\n *args, **kwargs):\n super().__init__(n_qubit, name=name, *args, **kwargs)\n self._parameter_symbol = parameter_symbol\n self._parameters = np.array([], dtype=object)\n self._readout_qubit = None\n self._flatten_circuit = flatten_circuit\n self._entangle_strategy = entangle_strategy if entangle_strategy else 'full'\n self._parameter_index = parameter_index\n self._reuse_param_per_depth = reuse_param_per_depth\n self._reuse_param_per_layer = reuse_param_per_layer\n self._reuse_param_per_template = reuse_param_per_template\n self._parameter_scale = parameter_scale \n self.build(rotation_blocks, entanglement_blocks, entangle_strategy, copies,\n final_rotation_layer)", "def test_construct_subcircuit_layers(self):\r\n dev = qml.device(\"default.qubit\", wires=3)\r\n\r\n def circuit(params):\r\n # section 1\r\n qml.RX(params[0], wires=0)\r\n # section 2\r\n qml.RY(params[1], wires=0)\r\n qml.CNOT(wires=[0, 1])\r\n qml.CNOT(wires=[1, 2])\r\n # section 3\r\n qml.RX(params[2], wires=0)\r\n qml.RY(params[3], wires=1)\r\n qml.RZ(params[4], wires=2)\r\n qml.CNOT(wires=[0, 1])\r\n qml.CNOT(wires=[1, 2])\r\n # section 4\r\n qml.RX(params[5], wires=0)\r\n qml.RY(params[6], wires=1)\r\n qml.RZ(params[7], wires=2)\r\n qml.CNOT(wires=[0, 1])\r\n qml.CNOT(wires=[1, 2])\r\n return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliX(1)), qml.expval(qml.PauliX(2))\r\n\r\n circuit = qml.QNode(circuit, dev)\r\n\r\n params = np.ones([8])\r\n tapes = circuit.metric_tensor(params, only_construct=True)\r\n\r\n # this circuit should split into 4 independent\r\n # sections or layers when constructing subcircuits\r\n assert len(tapes) == 4\r\n\r\n # first layer subcircuit\r\n assert len(tapes[0].operations) == 1\r\n assert isinstance(tapes[0].operations[0], qml.Hadamard) # PauliX decomp\r\n\r\n # second layer subcircuit\r\n assert len(tapes[1].operations) == 4\r\n assert isinstance(tapes[1].operations[0], qml.RX)\r\n # PauliY decomp\r\n assert isinstance(tapes[1].operations[1], qml.PauliZ)\r\n assert isinstance(tapes[1].operations[2], qml.S)\r\n assert isinstance(tapes[1].operations[3], qml.Hadamard)\r\n\r\n # # third layer subcircuit\r\n assert len(tapes[2].operations) == 8\r\n assert isinstance(tapes[2].operations[0], qml.RX)\r\n assert isinstance(tapes[2].operations[1], qml.RY)\r\n assert isinstance(tapes[2].operations[2], qml.CNOT)\r\n assert isinstance(tapes[2].operations[3], qml.CNOT)\r\n # PauliX decomp\r\n assert isinstance(tapes[2].operations[4], qml.Hadamard)\r\n # PauliY decomp\r\n assert isinstance(tapes[2].operations[5], qml.PauliZ)\r\n assert isinstance(tapes[2].operations[6], qml.S)\r\n assert isinstance(tapes[2].operations[7], qml.Hadamard)\r\n\r\n # # fourth layer subcircuit\r\n assert len(tapes[3].operations) == 13\r\n assert isinstance(tapes[3].operations[0], qml.RX)\r\n assert isinstance(tapes[3].operations[1], qml.RY)\r\n assert isinstance(tapes[3].operations[2], qml.CNOT)\r\n assert isinstance(tapes[3].operations[3], qml.CNOT)\r\n assert isinstance(tapes[3].operations[4], qml.RX)\r\n assert isinstance(tapes[3].operations[5], qml.RY)\r\n assert isinstance(tapes[3].operations[6], qml.RZ)\r\n assert isinstance(tapes[3].operations[7], qml.CNOT)\r\n assert isinstance(tapes[3].operations[8], qml.CNOT)\r\n # PauliX decomp\r\n assert isinstance(tapes[3].operations[9], qml.Hadamard)\r\n # PauliY decomp\r\n assert isinstance(tapes[3].operations[10], qml.PauliZ)\r\n assert isinstance(tapes[3].operations[11], qml.S)\r\n assert isinstance(tapes[3].operations[12], qml.Hadamard)", "def getSubInterface(self,subname,numNodesSub):\n subOptionInfo_p = []\n subSchemInfo_p = []\n filename_t = subname + '.sub'\n data_p = self.readNetlist(filename_t)\n subOptionInfo_p, subSchemInfo_p = self.separateNetlistInfo(data_p)\n if len(subOptionInfo_p) > 0:\n newline = subOptionInfo_p[0]\n newline = newline.split('.subckt '+ subname) \n intLine = newline[1].split()\n newindex = numNodesSub[subname]\n nodesInfoLine = intLine[0:newindex]\n return nodesInfoLine", "def __init__(self, workdir, encut, struct_path, name=\"relax_bwmn\"): \n potcar_path = \"../pseudos/BWO_Mn_POTCAR\" \n kgrid = [2, 2, 2] \n input_param = DefaultOptimizationParameters(encut) \n relax_calc = SCFCalculation(workdir, pseudo_par=None, kgrid=kgrid, name=\"BWO_Mn_relax\", encut=encut, input_parameters=input_param) \n relax_calc.make_calculation(struct_path, potcar_path=potcar_path)", "def __init__(self, name, config):\n self.name = name\n self.config = config\n LineBuilder.__init__(self, name, config)\n \n d0eeg_name = '{0}D0EEG'.format(name)\n dst2pid0eeg_name = '{0}Dst2PiD0EEG'.format(name)\n \n d0ee_name = '{0}D0EE'.format(name)\n dst2pid0ee_name = '{0}Dst2PiD0EE'.format(name)\n\n self.selD0EEG = makeEEGResonance(\n d0eeg_name, \n config,\n inputSel=[StdLooseAllPhotons,StdDiElectronFromTracks],\n decDescriptors=self.D0EEG\n ) \n\n self.selD0EE = makeEEResonance(\n d0ee_name, \n config,\n inputSel=[StdAllLooseElectrons],\n decDescriptors=self.D0EE\n )\n \n self.selDst2PiD0EEG = makeDstar(\n dst2pid0eeg_name,\n config,\n inputSel=[self.selD0EEG, StdAllNoPIDsPions],\n decDescriptors=self.Dst2D0Pi,\n )\n\n self.selDst2PiD0EE = makeDstar(\n dst2pid0ee_name,\n config,\n inputSel=[self.selD0EE, StdAllNoPIDsPions],\n decDescriptors=self.Dst2D0Pi,\n )\n\n self.line_Dst2PiD0EEG = make_line(\n self,\n '{0}Line'.format(dst2pid0eeg_name),\n prescale=config['PrescaleDst2PiD0EEG'],\n postscale=config['PostscaleDst2PiD0EEG'],\n selection=self.selDst2PiD0EEG,\n HLT1=config['Hlt1Filter'],\n HLT2=config['Hlt2Filter']\n )\n\n self.line_Dst2PiD0EE = make_line(\n self,\n '{0}Line'.format(dst2pid0ee_name),\n prescale=config['PrescaleDst2PiD0EE'],\n postscale=config['PostscaleDst2PiD0EE'],\n selection=self.selDst2PiD0EE,\n HLT1=config['Hlt1Filter'],\n HLT2=config['Hlt2Filter']\n )", "def compInit(self,compInfo, node, modelInfo, subcktName,dir_name,transInfo):\n print \"CompInfo inside compInit function : compInit------->\",compInfo\n #### initial processing to check if MOs is present. If so, library to be used is BondLib\n modelicaCompInit = []\n numNodesSub = {} \n mosInfo = {}\n IfMOS = '0'\n for eachline in compInfo:\n #words = eachline.split()\n if eachline[0] == 'm':\n IfMOS = '1'\n break\n if len(subcktName) > 0:\n subOptionInfo = []\n subSchemInfo = []\n for eachsub in subcktName:\n filename_tem = eachsub + '.sub'\n filename_tem = os.path.join(dir_name, filename_tem)\n data = self.readNetlist(filename_tem)\n subOptionInfo, subSchemInfo = self.separateNetlistInfo(data)\n \n for eachline in subSchemInfo:\n #words = eachline.split()\n if eachline[0] == 'm':\n IfMOS = '1'\n break\n \n #Lets Start with Source details\n for eachline in self.sourceDetail:\n eachline = eachline.lower()\n words = eachline.split()\n typ = words[3].split('(')\n if typ[0] == \"pulse\":\n per = words[9].split(')')\n stat = self.mappingData[\"Sources\"][typ[0]]+' '+words[0]+'(rising = '+words[6]+', V = '+words[4]\\\n +', width = '+words[8]+', period = '+per[0]+', offset = '+typ[1]+', startTime = '+words[5]+', falling = '+words[7]+');' \n modelicaCompInit.append(stat)\n if typ[0] == \"sine\":\n theta = words[7].split(')')\n stat = self.mappingData[\"Sources\"][typ[0]]+' '+words[0]+'(offset = '+typ[1]+', V = '+words[4]+', freqHz = '+words[5]+', startTime = '+words[6]+', phase = '+theta[0]+');'\n modelicaCompInit.append(stat)\n if typ[0] == \"pwl\":\n keyw = self.mappingData[\"Sources\"][typ[0]]+' '\n stat = keyw + words[0] + '(table = [' + typ[1] + ',' + words[4] + ';'\n length = len(words);\n for i in range(6,length,2):\n if i == length-2:\n w = words[i].split(')')\n stat = stat + words[i-1] + ',' + w[0] \n else:\n stat = stat + words[i-1] + ',' + words[i] + ';'\n stat = stat + ']);'\n modelicaCompInit.append(stat) \n if typ[0] == words[3] and typ[0] != \"dc\":\n #It is DC constant but no dc keyword\n val_temp = typ[0].split('v')\n stat = self.mappingData[\"Sources\"][\"dc\"]+' ' + words[0] + '(V = ' + val_temp[0] + ');' \n modelicaCompInit.append(stat)\n elif typ[0] == words[3] and typ[0] == \"dc\":\n stat = self.mappingData[\"Sources\"][typ[0]]+' ' + words[0] + '(V = ' + words[4] + ');' ### check this\n modelicaCompInit.append(stat)\n \n #Lets start for device\n for eachline in self.deviceDetail:\n words=eachline.split()\n if eachline[0]=='d' or eachline[0]=='D':\n if len(words)>3:\n if modelInfo[words[3]].has_key('n'):\n n = float(modelInfo[words[3]]['n'])\n else:\n n = 1.0\n vt = str(float(0.025*n))\n stat = self.mappingData[\"Devices\"][eachline[0]]+' '+ words[0] + '(Ids = ' + modelInfo[words[3]]['is'] + ', Vt = ' + vt + ', R = 1e12' +');'\n else:\n stat = self.mappingData[\"Devices\"][eachline[0]]+' '+ words[0] +';'\n modelicaCompInit.append(stat)\n \n elif eachline[0]=='q' or eachline[0]=='Q':\n if words[4]=='npn':\n start = 'Analog.Semiconductors.NPN '\n elif words[4]=='pnp':\n start = 'Analog.Semiconductors.PNP '\n \n inv_vak = float(self.tryExists(modelInfo,words,4, 'vaf', 50))\n vak_temp = 1/inv_vak\n vak = str(vak_temp)\n bf = self.tryExists(modelInfo,words,4, 'bf', 50)\n br = self.tryExists(modelInfo,words,4, 'br', 0.1)\n Is = self.tryExists(modelInfo,words,4, 'is', 1e-16)\n tf = self.tryExists(modelInfo,words,4, 'tf', 1.2e-10)\n tr = self.tryExists(modelInfo,words,4, 'tr', 5e-9)\n cjs = self.tryExists(modelInfo,words,4, 'cjs', 1e-12)\n cje = self.tryExists(modelInfo,words,4, 'cje', 4e-13)\n cjc = self.tryExists(modelInfo,words,4, 'cjc', 5e-13)\n vje = self.tryExists(modelInfo,words,4, 'vje', 0.8)\n mje = self.tryExists(modelInfo,words,4, 'mje', 0.4)\n vjc = self.tryExists(modelInfo,words,4, 'vjc', 0.8)\n mjc = self.tryExists(modelInfo,words,4, 'mjc', 0.333)\n stat = start + words[0] +'(Bf = ' + bf + ', Br = ' + br + ', Is = ' +Is+ ', Vak = ' + vak + ', Tauf = ' +tf+ ', Taur = ' +tr+ ', Ccs = ' +cjs+ ', Cje = ' +cje+ ', Cjc = ' +cjc+ ', Phie = ' + vje + ', Me = ' + mje + ', Phic = ' + vjc + ', Mc = ' + mjc + ');'\n modelicaCompInit.append(stat)\n \n elif eachline[0]=='m' or eachline[0]==\"M\":\n print \"Starting Mosfet\"\n eachline = eachline.split(words[5])\n eachline = eachline[1]\n eachline = eachline.strip()\n eachline = eachline.replace(' = ', '=').replace('= ','=').replace(' =','=').replace(' * ', '*').replace(' + ', '+').replace(' { ', '').replace(' } ', '')\n eachline = eachline.split()\n mosInfo[words[0]] = {}\n for each in eachline:\n if len(each) > 1:\n each = each.split('=')\n mosInfo[words[0]][each[0]] = each[1]\n trans = transInfo[words[5]]\n if trans == 'nmos':\n start = 'BondLib.Electrical.Analog.Spice.Mn '\n else:\n start = 'BondLib.Electrical.Analog.Spice.Mp '\n vto = self.tryExists(modelInfo,words,5,'vto',0)\n gam = self.tryExists(modelInfo,words,5,'gamma',0)\n phi = self.tryExists(modelInfo,words,5, 'phi', 0)\n ld = self.tryExists(modelInfo,words,5,'ld',0)\n uo = self.tryExists(modelInfo,words,5,'uo',0)\n lam = self.tryExists(modelInfo,words,5,'lambda',0)\n tox = self.tryExists(modelInfo,words,5,'tox',3e-9)\n pb = self.tryExists(modelInfo,words,5, 'pb',0.8)\n cj = self.tryExists(modelInfo,words,5, 'cj',0)\n cjsw = self.tryExists(modelInfo,words,5, 'cjsw',1e-9)\n mj = self.tryExists(modelInfo,words,5, 'mj',0.33)\n mjsw = self.tryExists(modelInfo,words,5, 'mjsw',0.33)\n cgdo = self.tryExists(modelInfo,words,5, 'cgdo',0)\n js = self.tryExists(modelInfo,words,5, 'js',0)\n cgbo = self.tryExists(modelInfo,words,5, 'cgbo',0)\n cgso = self.tryExists(modelInfo,words,5,'cgso',0)\n try:\n l = mosInfo[words[0]]['l']\n except KeyError:\n l = '1e-6'\n try:\n w = mosInfo[words[0]]['w']\n except KeyError:\n w = '100e-6'\n try:\n As = mosInfo[words[0]]['as']\n ad = mosInfo[words[0]]['ad']\n except KeyError:\n As = '0'\n ad = '0'\n try:\n ps = mosInfo[words[0]]['ps']\n pd = mosInfo[words[0]]['pd']\n except KeyError:\n ps = '0'\n pd = '0'\n stat = start + words[0] + '(Tnom = 300, VT0 = ' + vto + ', GAMMA = ' + gam + ', PHI = ' + phi + ', LD = ' +ld+ ', U0 = ' + str(float(uo)*0.0001) + ', LAMBDA = ' + lam + ', TOX = ' +tox+ ', PB = ' + pb + ', CJ = ' +cj+ ', CJSW = ' +cjsw+ ', MJ = ' + mj + ', MJSW = ' + mjsw + ', CGD0 = ' +cgdo+ ', JS = ' +js+ ', CGB0 = ' +cgbo+ ', CGS0 = ' +cgso+ ', L = ' +l+ ', W = ' + w + ', Level = 1' + ', AD = ' + ad + ', AS = ' + As + ', PD = ' + pd + ', PS = ' + ps + ');'\n stat = stat.translate(maketrans('{}', ' '))\n modelicaCompInit.append(stat)\n \n #Lets start for Subcircuit\n for eachline in self.subCktDetail:\n print \"each Line-------->\",eachline\n global point\n global subname\n temp_line = eachline.split()\n temp = temp_line[0].split('x')\n index = temp[1]\n for i in range(0,len(temp_line),1):\n if temp_line[i] in subcktName:\n subname = temp_line[i]\n numNodesSub[subname] = i - 1\n point = i\n if len(temp_line) > point + 1:\n rem = temp_line[point+1:len(temp_line)]\n rem_new = ','.join(rem)\n stat = subname + ' ' + subname +'_instance' + index + '(' + rem_new + ');'\n else:\n stat = subname + ' ' + subname +'_instance' + index + ';'\n modelicaCompInit.append(stat)\n \n \n for eachline in compInfo:\n words = eachline.split()\n #val = words[3]\n #value = self.splitIntoVal(val)\n value = self.getUnitVal(words[-1])\n if eachline[0] == 'r':\n stat = 'Analog.Basic.Resistor ' + words[0] + '(R = ' + value + ');'\n modelicaCompInit.append(stat)\n elif eachline[0] == 'c':\n stat = 'Analog.Basic.Capacitor ' + words[0] + '(C = ' + value + ');'\n modelicaCompInit.append(stat)\n elif eachline[0] == 'l':\n stat = 'Analog.Basic.Inductor ' + words[0] + '(L = ' + value + ');'\n modelicaCompInit.append(stat) \n elif eachline[0] == 'e':\n stat = 'Analog.Basic.VCV ' + words[0] + '(gain = ' + self.getUnitVal(words[5]) + ');'\n modelicaCompInit.append(stat) \n elif eachline[0] == 'g':\n stat = 'Analog.Basic.VCC ' + words[0] + '(transConductance = ' + self.getUnitVal(words[5]) + ');'\n modelicaCompInit.append(stat) \n elif eachline[0] == 'f':\n stat = 'Analog.Basic.CCC ' + words[0] + '(gain = ' + self.getUnitVal(words[4]) + ');'\n modelicaCompInit.append(stat) \n elif eachline[0] == 'h':\n stat = 'Analog.Basic.CCV ' + words[0] + '(transResistance = ' + self.getUnitVal(words[4]) + ');'\n modelicaCompInit.append(stat)\n \n else:\n continue\n \n if '0' or 'gnd' in node:\n modelicaCompInit.append('Analog.Basic.Ground g;')\n return modelicaCompInit, numNodesSub", "def __init__(self):\n super().__init__(sys.argv)\n self.s1 = serverControl()\n self.c1 = clientControl(\"Markus\")\n self.c2 = clientControl(\"Hannes\")", "def __init__(self, name, config):\n self.name = name\n self.config = config\n LineBuilder.__init__(self, name, config)\n \n pi0eeg_name = '{0}Pi0EEG'.format(name)\n \n d2pipi0eeg_name = '{0}D2PiPi0EEG'.format(name) \n d2kpi0eeg_name = '{0}D2KPi0EEG'.format(name)\n\n d02pi0pi0eeg_name = '{0}D02Pi0Pi0EEG'.format(name) \n dst2d0pieeg_name = '{0}Dst2D0PiEEG'.format(name)\n\n self.selPi0EEG = makeEEGResonance(\n pi0eeg_name, \n config,\n inputSel=[StdLooseAllPhotons,StdDiElectronFromTracks],\n decDescriptors=self.Pi0EEG\n )\n \n self.selD2PiPi0EEG = makeD(\n d2pipi0eeg_name,\n config,\n inputSel=[self.selPi0EEG, StdAllNoPIDsPions],\n decDescriptors=self.D2PiPi0,\n useBachelorPID=False\n )\n\n usePIDforKaon = False\n if (config['Bachelor_PIDK_MIN'] > -10):\n usePIDforKaon = True\n else :\n usePIDforKaon = False\n \n self.selD2KPi0EEG = makeD(\n d2kpi0eeg_name, \n config,\n inputSel=[self.selPi0EEG, StdAllNoPIDsKaons],\n decDescriptors=self.D2KPi0,\n useBachelorPID=usePIDforKaon\n )\n\n self.selD02Pi0Pi0EEG = makeD(\n d02pi0pi0eeg_name, \n config,\n inputSel=[self.selPi0EEG],\n decDescriptors=self.D02Pi0Pi0,\n useBachelorPID=False,\n )\n\n self.selDst2D0PiEEG = makeDstar(\n dst2d0pieeg_name,\n config,\n inputSel=[self.selD02Pi0Pi0EEG, StdAllNoPIDsPions],\n decDescriptors=self.Dst2D0Pi,\n )\n\n self.line_D2PiPi0EEG = make_line(\n self,\n '{0}Line'.format(d2pipi0eeg_name),\n prescale=config['PrescaleD2PiPi0EEG'],\n postscale=config['PostscaleD2PiPi0EEG'],\n selection=self.selD2PiPi0EEG,\n HLT1=config['Hlt1Filter'],\n HLT2=config['Hlt2Filter']\n )\n\n self.line_D2KPi0EEG = make_line(\n self,\n '{0}Line'.format(d2kpi0eeg_name),\n prescale=config['PrescaleD2KPi0EEG'],\n postscale=config['PostscaleD2KPi0EEG'],\n selection=self.selD2KPi0EEG,\n HLT1=config['Hlt1Filter'],\n HLT2=config['Hlt2Filter']\n )\n \n self.line_Dst2D0PiEEG = make_line(\n self,\n '{0}Line'.format(dst2d0pieeg_name),\n prescale=config['PrescaleDst2D0PiEEG'],\n postscale=config['PostscaleDst2D0PiEEG'],\n selection=self.selDst2D0PiEEG,\n HLT1=config['Hlt1Filter'],\n HLT2=config['Hlt2Filter']\n )", "def __init__(self, c, selmgr, num_circuits, RouterClass):\r\n PathBuilder.__init__(self, c, selmgr, RouterClass)\r\n # Set handler to the connection here to \r\n # not miss any circuit events on startup\r\n c.set_event_handler(self)\r\n self.num_circuits = num_circuits # Size of the circuit pool\r\n self.check_circuit_pool() # Bring up the pool of circs\r", "def process_tree(tree):\n c = circuit()\n l = line()\n names = {}\n procedures = []\n for lst in tree.children:\n print(lst)\n if type(lst[0]) is str:\n names[lst[0]] = lst[1]\n else:\n procedures.append(lst)\n print(names)\n #print(procedures)\n\n for proc in procedures:\n\n proc_elements_names = proc[0]\n proc_name = proc[1]\n\n #print(proc_elements_names)\n #print(proc_name)\n\n if proc_name == \"set_mode\":\n mode_name = proc_elements_names[0]\n if mode_name != \"draw-mode\": \n c.set_mode(mode_name)\n elif mode_name == \"draw-mode\":\n l1 = line()\n # draw mode is different from other modes\n for element in names:\n e = CompleteElement(element)\n e.set_other_attrs(names[element])\n e.process_other_attrs()\n l1.addElement(e)\n c.connectInSeries(l1)\n c.set_mode(\"draw-mode\")\n \n \n if proc_name == \"series\":\n l1 = line()\n for element in proc_elements_names:\n l1.addElement(names[element])\n l = l1\n c.connectInSeries(l)\n #raise SyntaxError(\"Alias {0} referrenced before assignment\".format(item[0]))\n\n elif proc_name == \"parallel\":\n l1 = line()\n for element in proc_elements_names:\n l1.addElement(names[element])\n c.connectInParallel(l1)\n l1 = line()\n\n\n elif proc_name == \"add_parallel\":\n new_element = proc_elements_names[1]\n old_element = proc_elements_names[0]\n l1 = line()\n l1.addElement(names[new_element])\n c.connection.append(l1)\n\n\n elif proc_name == \"add_series\":\n new_element = proc_elements_names[1]\n old_element = proc_elements_names[0]\n for ln in c.connection:\n for e in ln.elements:\n if names[old_element] == e:\n ln.addElement(names[new_element])\n\n\n c.evaluate(\"output.png\")\n #print(c)", "def main(args):\n if len(sys.argv) == 2:\n filename = sys.argv[1]\n else:\n print \"USAGE:\"\n print \"python NgspicetoModelica.py <filename>\"\n sys.exit()\n \n dir_name = os.path.dirname(os.path.realpath(filename))\n file_basename = os.path.basename(filename)\n \n obj_NgMoConverter = NgMoConverter()\n \n #Getting all the require information\n lines = obj_NgMoConverter.readNetlist(filename)\n #print \"Complete Lines of Ngspice netlist :lines ---------------->\",lines\n optionInfo, schematicInfo = obj_NgMoConverter.separateNetlistInfo(lines)\n #print \"All option details like analysis,subckt,.ic,.model : OptionInfo------------------->\",optionInfo\n #print \"Schematic connection info :schematicInfo\",schematicInfo\n modelName, modelInfo, subcktName, paramInfo,transInfo = obj_NgMoConverter.addModel(optionInfo)\n print \"Name of Model : modelName-------------------->\",modelName\n print \"Model Information :modelInfo--------------------->\",modelInfo\n #print \"Subcircuit Name :subcktName------------------------>\",subcktName\n #print \"Parameter Information :paramInfo---------------------->\",paramInfo\n \n \n modelicaParamInit = obj_NgMoConverter.processParam(paramInfo)\n #print \"Make modelicaParamInit from paramInfo :processParamInit------------->\",modelicaParamInit \n compInfo, plotInfo = obj_NgMoConverter.separatePlot(schematicInfo)\n print \"Info like run etc : CompInfo----------------->\",compInfo\n #print \"Plot info like plot,print etc :plotInfo\",plotInfo\n IfMOS = '0'\n \n for eachline in compInfo:\n words = eachline.split()\n if eachline[0] == 'm':\n IfMOS = '1'\n break\n subOptionInfo = []\n subSchemInfo = []\n if len(subcktName) > 0:\n #subOptionInfo = []\n #subSchemInfo = []\n for eachsub in subcktName:\n filename_temp = eachsub + '.sub'\n data = obj_NgMoConverter.readNetlist(filename_temp)\n subOptionInfo, subSchemInfo = obj_NgMoConverter.separateNetlistInfo(data)\n for eachline in subSchemInfo:\n words = eachline.split()\n if eachline[0] == 'm':\n IfMOS = '1'\n break\n #print \"Subcircuit OptionInfo : subOptionInfo------------------->\",subOptionInfo\n #print \"Subcircuit Schematic Info :subSchemInfo-------------------->\",subSchemInfo\n \n node, nodeDic, pinInit, pinProtectedInit = obj_NgMoConverter.nodeSeparate(compInfo, '0', [], subcktName,[])\n print \"All nodes in the netlist :node---------------->\",node\n print \"NodeDic which will be used for modelica : nodeDic------------->\",nodeDic\n #print \"PinInit-------------->\",pinInit\n #print \"pinProtectedInit----------->\",pinProtectedInit\n \n modelicaCompInit, numNodesSub = obj_NgMoConverter.compInit(compInfo,node, modelInfo, subcktName,dir_name,transInfo)\n print \"ModelicaComponents : modelicaCompInit----------->\",modelicaCompInit\n print \"SubcktNumNodes : numNodesSub---------------->\",numNodesSub\n \n connInfo = obj_NgMoConverter.connectInfo(compInfo, node, nodeDic, numNodesSub,subcktName)\n \n #print \"ConnInfo------------------>\",connInfo\n \n \n ###After Sub Ckt Func\n if len(subcktName) > 0:\n data, subOptionInfo, subSchemInfo, subModel, subModelInfo, subsubName,subParamInfo, modelicaSubCompInit, modelicaSubParam,\\\n nodeSubInterface,nodeSub, nodeDicSub, pinInitSub, connSubInfo = obj_NgMoConverter.procesSubckt(subcktName,numNodesSub,dir_name) #Adding 'numNodesSub' by Fahim\n \n #Creating Final Output file\n newfile = filename.split('.')\n newfilename = newfile[0]\n outfile = newfilename + \".mo\"\n out = open(outfile,\"w\")\n out.writelines('model ' + os.path.basename(newfilename))\n out.writelines('\\n')\n if IfMOS == '0':\n out.writelines('import Modelica.Electrical.*;')\n elif IfMOS == '1':\n out.writelines('import BondLib.Electrical.*;')\n #out.writelines('import Modelica.Electrical.*;')\n out.writelines('\\n')\n \n for eachline in modelicaParamInit:\n if len(paramInfo) == 0:\n continue\n else:\n out.writelines(eachline)\n out.writelines('\\n')\n for eachline in modelicaCompInit:\n if len(compInfo) == 0:\n continue\n else:\n out.writelines(eachline)\n out.writelines('\\n')\n \n out.writelines('protected')\n out.writelines('\\n')\n out.writelines(pinInit)\n out.writelines('\\n')\n out.writelines('equation')\n out.writelines('\\n')\n \n for eachline in connInfo:\n if len(connInfo) == 0:\n continue\n else:\n out.writelines(eachline)\n out.writelines('\\n')\n \n out.writelines('end '+ os.path.basename(newfilename) + ';')\n out.writelines('\\n')\n\n\n out.close()", "def __init__(self, name=None, start_settings=None,\n parallel_settings=None, electronic_settings=None, magnetic_settings=None,\n ionic_settings=None, hubbard_settings=None, hybrid_settings=None, misc_settings=None):\n\n self._name = name or \"input_param\"\n self._start_settings = start_settings or {\"NWRITE\": 2, \"ISTART\": 1, \"INIWAV\": 1,\n \"ICHARG\": None, \"NELECT\": None, \"LORBIT\": 11,\n \"NEDOS\": 1000, \"LOPTICS\": \".FALSE.\",\"ISYM\": -1 , \"LELF\": None, \"LVHAR\": None, \"RWIGS\": None, \"LVTOF\": None, \"NBANDS\": None, \"LWAVE\": None}\n self._parallel_settings = parallel_settings or {\"flnm\": \"run_scf.sh\", \"job_name\": \"scf_std\", \"machine\": \"nano\" ,\n \"partition\": \"etna\", \"nodes\": 4,\"ppn\": 24,\n \"max_time\": \"24:00:00\", \"NCORE\": 8, \"KPAR\": 2, \"exec\": \"vasp_std\"}\n self._electronic_settings = electronic_settings or {\"PREC\":\"Accurate\" , \"ALGO\": \"Normal\", \"ENCUT\": 800,\n \"NELM\": None, \"NELMIN\": None, \"GGA\": \"PS\" ,\"EDIFF\": 10E-05, \"ISMEAR\": 1,\n \"SIGMA\": 0.2, \"LASPH\": \".TRUE.\", \"LREAL\": \"Auto\", \"ADDGRID\": \".TRUE.\", \"MAXMIX\": 100, \"BMIX\": 1.5}\n self._ionic_settings = ionic_settings\n self._magnetic_settings = magnetic_settings\n self._hybrid_settings = hybrid_settings\n self._hubbard_settings = hubbard_settings\n self._misc_settings = misc_settings", "def __init__(self, name, config):\n self.name = name\n self.config = config\n LineBuilder.__init__(self, name, config)\n \n phi3hm_name = '{0}Phi3HM'.format(name)\n phi3hr_name = '{0}Phi3HR'.format(name)\n \n d2piphi3hm_name = '{0}D2PiPhi3HM'.format(name)\n d2piphi3hr_name = '{0}D2PiPhi3HR'.format(name)\n \n d2kphi3hm_name = '{0}D2KPhi3HM'.format(name)\n d2kphi3hr_name = '{0}D2KPhi3HR'.format(name)\n \n\n self.selPhi3HM = makeResonance(\n phi3hm_name,\n config,\n inputSel=[StdAllNoPIDsPions,StdLooseMergedPi0],\n decDescriptors=self.Phi3H\n )\n\n self.selPhi3HR = makeResonance(\n phi3hr_name,\n config,\n inputSel=[StdAllNoPIDsPions,StdLooseResolvedPi0],\n decDescriptors=self.Phi3H\n )\n \n self.selD2PiPhi3HM = makeD(\n d2piphi3hm_name,\n config,\n inputSel=[self.selPhi3HM, StdAllNoPIDsPions],\n decDescriptors=self.D2PiPhi,\n useBachelorPID=False,\n )\n \n self.selD2KPhi3HM = makeD(\n d2kphi3hm_name,\n config,\n inputSel=[self.selPhi3HM, StdAllNoPIDsKaons],\n decDescriptors=self.D2KPhi,\n useBachelorPID=False,\n )\n \n self.selD2PiPhi3HR = makeD(\n d2piphi3hr_name,\n config,\n inputSel=[self.selPhi3HR, StdAllNoPIDsPions],\n decDescriptors=self.D2PiPhi,\n useBachelorPID=False,\n )\n \n self.selD2KPhi3HR = makeD(\n d2kphi3hr_name,\n config,\n inputSel=[self.selPhi3HR, StdAllNoPIDsKaons],\n decDescriptors=self.D2KPhi,\n useBachelorPID=False,\n )\n \n self.line_D2PiPhi3HM = make_line(\n self,\n name='{0}Line'.format(d2piphi3hm_name),\n prescale=config['PrescaleD2PiPhi3HM'],\n postscale=config['PostscaleD2PiPhi3HM'],\n selection=self.selD2PiPhi3HM,\n HLT1=config['Hlt1Filter'],\n HLT2=config['Hlt2Filter']\n )\n \n self.line_D2PiPhi3HR = make_line(\n self,\n name='{0}Line'.format(d2piphi3hr_name),\n prescale=config['PrescaleD2PiPhi3HR'],\n postscale=config['PostscaleD2PiPhi3HR'],\n selection=self.selD2PiPhi3HR,\n HLT1=config['Hlt1Filter'],\n HLT2=config['Hlt2Filter']\n )\n self.line_D2KPhi3HM = make_line(\n self,\n name='{0}Line'.format(d2kphi3hm_name),\n prescale=config['PrescaleD2KPhi3HM'],\n postscale=config['PostscaleD2KPhi3HM'],\n selection=self.selD2KPhi3HM,\n HLT1=config['Hlt1Filter'],\n HLT2=config['Hlt2Filter']\n )\n \n self.line_D2KPhi3HR = make_line(\n self,\n name='{0}Line'.format(d2kphi3hr_name),\n prescale=config['PrescaleD2KPhi3HR'],\n postscale=config['PostscaleD2KPhi3HR'],\n selection=self.selD2KPhi3HR,\n HLT1=config['Hlt1Filter'],\n HLT2=config['Hlt2Filter']\n )", "def __str__(self):\n return self.components + \", \" + self.circuit_name", "def buildCircuit(circuit, frequencies, *parameters, eval_string='', index=0):\n\n parameters = np.array(parameters).tolist()\n frequencies = np.array(frequencies).tolist()\n circuit = circuit.replace(' ', '')\n\n def parse_circuit(circuit, parallel=False, series=False):\n \"\"\" Splits a circuit string by either dashes (series) or commas\n (parallel) outside of any paranthesis. Removes any leading 'p('\n or trailing ')' when in parallel mode \"\"\"\n\n assert parallel != series, \\\n 'Exactly one of parallel or series must be True'\n\n def count_parens(string):\n return string.count('('), string.count(')')\n\n if parallel:\n special = ','\n if circuit.endswith(')') and circuit.startswith('p('):\n circuit = circuit[2:-1]\n if series:\n special = '-'\n\n split = circuit.split(special)\n result = []\n skipped = []\n for i, sub_str in enumerate(split):\n if i not in skipped:\n if '(' not in sub_str and ')' not in sub_str:\n result.append(sub_str)\n else:\n open_parens, closed_parens = count_parens(sub_str)\n if open_parens == closed_parens:\n result.append(sub_str)\n else:\n uneven = True\n while i < len(split) - 1 and uneven:\n sub_str += special + split[i+1]\n\n open_parens, closed_parens = count_parens(sub_str)\n uneven = open_parens != closed_parens\n\n i += 1\n skipped.append(i)\n result.append(sub_str)\n return result\n\n parallel = parse_circuit(circuit, parallel=True)\n series = parse_circuit(circuit, series=True)\n\n if parallel is not None and len(parallel) > 1:\n eval_string += \"p([\"\n split = parallel\n elif series is not None and len(series) > 1:\n eval_string += \"s([\"\n split = series\n\n for i, elem in enumerate(split):\n if ',' in elem or '-' in elem:\n eval_string, index = buildCircuit(elem, frequencies,\n *parameters,\n eval_string=eval_string,\n index=index)\n else:\n param_string = \"\"\n elem_number = len(elem.split(\"/\"))\n\n param_string += str(parameters[index:index + elem_number])\n new = elem[0] + '(' + param_string + ',' + str(frequencies) + ')'\n eval_string += new\n\n index += elem_number\n\n if i == len(split) - 1:\n eval_string += '])'\n else:\n eval_string += ','\n\n return eval_string, index", "def read_initial_parameters(inputfilename):\r\n subc_params = []\r\n subarea_params = []\r\n global subc_names\r\n subc_names = []\r\n subcatchment_parameters = []\r\n inputfile = open(inputfilename, 'r')\r\n for line in inputfile:\r\n if(line.find(\"[SUBCATCHMENTS]\") != -1):\r\n line = inputfile.readline()\r\n for i in range(count):\r\n templine = list(line)\r\n if templine[0] == \";\" or templine[0] == \" \" or len(templine) < 10:\r\n line = inputfile.readline()\r\n continue\r\n\r\n elif (line.find(\"[\") != -1):\r\n break\r\n else:\r\n linesplit = line.split()\r\n subc_params.append(linesplit[4:7])\r\n subc_names.append(linesplit[0])\r\n line = inputfile.readline()\r\n if (line.find(\"[SUBAREAS]\") != -1):\r\n line = inputfile.readline()\r\n for i in range(count):\r\n templine = list(line)\r\n if templine[0] == \";\" or templine[0] == \" \" or len(templine) < 10:\r\n line = inputfile.readline()\r\n continue\r\n elif (line.find(\"[\") != -1):\r\n break\r\n else:\r\n linesplit = line.split()\r\n subarea_params.append(linesplit[1:6])\r\n line = inputfile.readline()\r\n inputfile.close()\r\n\r\n #Part of the function that experiments with np array. Potentially removes the need for the list transformation\r\n # functions that chew up a lot of time. Each subcatchment has a row, each parameter type has a column.\r\n global subcatchment_parameters_np\r\n subcatchment_parameters_np = np.empty((len(subc_params[0]) + len(subarea_params[0]), len(subc_params)), dtype=float)\r\n for row in range(len(subc_params)):\r\n for col in range(len(subc_params[0])):\r\n subcatchment_parameters_np[row, col] = float(subc_params[row][col])\r\n for row in range(len(subarea_params)):\r\n for col in range(len(subarea_params[0])):\r\n subcatchment_parameters_np[row, col + len(subc_params[0])] = float(subarea_params[row][col])\r\n\r\n #Old string code\r\n # for i in range(len(subc_params)):\r\n # for j in range(len(subarea_params[i])):\r\n # subc_params[i].append(subarea_params[i][j])\r\n # subcatchment_parameters.append(subc_params[i])\r\n return(np_subcatchment_parameters)", "def __init__(self, name, config):\n self.name = name\n self.config = config\n LineBuilder.__init__(self, name, config)\n \n etappg_name = '{0}EtaPPG'.format(name)\n eta3hr_name = '{0}Eta3HR'.format(name)\n eta3hm_name = '{0}Eta3HM'.format(name)\n \n d2pietappg_name = '{0}D2PiEtaPPG'.format(name)\n d2pieta3hr_name = '{0}D2PiEta3HR'.format(name)\n d2pieta3hm_name = '{0}D2PiEta3HM'.format(name)\n \n d2ketappg_name = '{0}D2KEtaPPG'.format(name)\n d2keta3hr_name = '{0}D2KEta3HR'.format(name)\n d2keta3hm_name = '{0}D2KEta3HM'.format(name)\n\n self.selEtaPPG = makeResonance(\n etappg_name,\n config,\n inputSel=[StdAllNoPIDsPions,StdLooseAllPhotons],\n decDescriptors=self.EtaPPG\n )\n\n self.selEta3HR = makeResonance(\n eta3hr_name,\n config,\n inputSel=[StdAllNoPIDsPions,StdLooseResolvedPi0],\n decDescriptors=self.Eta3H\n )\n\n self.selEta3HM = makeResonance(\n eta3hm_name,\n config,\n inputSel=[StdAllNoPIDsPions,StdLooseMergedPi0],\n decDescriptors=self.Eta3H\n )\n\n self.selD2PiEtaPPG = makeD(\n d2pietappg_name,\n config,\n inputSel=[self.selEtaPPG, StdAllNoPIDsPions],\n decDescriptors=self.D2PiEta,\n useBachelorPID=False,\n )\n\n self.selD2KEtaPPG = makeD(\n d2ketappg_name,\n config,\n inputSel=[self.selEtaPPG, StdAllNoPIDsKaons],\n decDescriptors=self.D2KEta,\n useBachelorPID=False,\n )\n \n self.selD2PiEta3HR = makeD(\n d2pieta3hr_name,\n config,\n inputSel=[self.selEta3HR, StdAllNoPIDsPions],\n decDescriptors=self.D2PiEta,\n useBachelorPID=False,\n )\n\n self.selD2KEta3HR = makeD(\n d2keta3hr_name,\n config,\n inputSel=[self.selEta3HR, StdAllNoPIDsKaons],\n decDescriptors=self.D2KEta,\n useBachelorPID=False,\n )\n \n self.selD2PiEta3HM = makeD(\n d2pieta3hm_name,\n config,\n inputSel=[self.selEta3HM, StdAllNoPIDsPions],\n decDescriptors=self.D2PiEta,\n useBachelorPID=False,\n )\n \n\n self.selD2KEta3HM = makeD(\n d2keta3hm_name,\n config,\n inputSel=[self.selEta3HM, StdAllNoPIDsKaons],\n decDescriptors=self.D2KEta,\n useBachelorPID=False,\n )\n \n\n self.line_D2PiEtaPPG = make_line(\n self,\n name='{0}Line'.format(d2pietappg_name),\n prescale=config['PrescaleD2PiEtaPPG'],\n postscale=config['PostscaleD2PiEtaPPG'],\n selection=self.selD2PiEtaPPG,\n HLT1=config['Hlt1Filter'],\n HLT2=config['Hlt2Filter']\n )\n\n self.line_D2PiEta3HM = make_line(\n self,\n name='{0}Line'.format(d2pieta3hm_name),\n prescale=config['PrescaleD2PiEta3HM'],\n postscale=config['PostscaleD2PiEta3HM'],\n selection=self.selD2PiEta3HM,\n HLT1=config['Hlt1Filter'],\n HLT2=config['Hlt2Filter']\n )\n\n self.line_D2PiEta3HR = make_line(\n self,\n name='{0}Line'.format(d2pieta3hr_name),\n prescale=config['PrescaleD2PiEta3HR'],\n postscale=config['PostscaleD2PiEta3HR'],\n selection=self.selD2PiEta3HR,\n HLT1=config['Hlt1Filter'],\n HLT2=config['Hlt2Filter']\n )\n\n self.line_D2KEtaPPG = make_line(\n self,\n name='{0}Line'.format(d2ketappg_name),\n prescale=config['PrescaleD2KEtaPPG'],\n postscale=config['PostscaleD2KEtaPPG'],\n selection=self.selD2KEtaPPG,\n HLT1=config['Hlt1Filter'],\n HLT2=config['Hlt2Filter']\n )\n \n self.line_D2KEta3HM = make_line(\n self,\n name='{0}Line'.format(d2keta3hm_name),\n prescale=config['PrescaleD2KEta3HM'],\n postscale=config['PostscaleD2KEta3HM'],\n selection=self.selD2KEta3HM,\n HLT1=config['Hlt1Filter'],\n HLT2=config['Hlt2Filter']\n )\n \n self.line_D2KEta3HR = make_line(\n self,\n name='{0}Line'.format(d2keta3hr_name),\n prescale=config['PrescaleD2KEta3HR'],\n postscale=config['PostscaleD2KEta3HR'],\n selection=self.selD2KEta3HR,\n HLT1=config['Hlt1Filter'],\n HLT2=config['Hlt2Filter']\n )", "def testcliparams(c, name=\"def\"):\n print(name)", "def construct_circuit(self):\n return self._circuit", "def parse_control(self,ins): \n if ins.instr == 'j':\n if len(ins.args) == 1:\n if type(ins.args[0]) == Register:\n self.need = [ins.args[0]]\n if ins.args[0].expr == \"$31\":\n self.need += [Register(\"$2\"), Register(\"$3\"),Register(\"$16\"),Register(\"$17\"),Register(\"$18\"),Register(\"$19\"),Register(\"$20\"),Register(\"$21\"),Register(\"$22\"),Register(\"$23\"),Register(\"$f0\"),Register(\"$f1\"),Register(\"$f2\"),Register(\"$f3\"),Register(\"$fp\"),Register(\"$sp\"),Register(\"$f20\"),Register(\"$f22\"),Register(\"$f24\"),Register(\"$f26\"),Register(\"$f28\"),Register(\"$f30\")]\n else:\n self.label = [ins.args[0]]\n \n \n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'jal':\n self.gen = [Register(\"$31\"),Register(\"$2\"),Register(\"$3\"),Register(\"$f0\")] #Return address and values\n #Reg $4,5,6,7 are registers that are used for fuction arguments.\n # $f12, $f13, $f14, $f15 parameter registers zijn voor functies. \n # http://msdn.microsoft.com/en-us/library/ms253512%28v=vs.90%29.aspx \n #it is not clear which one will be used.\n self.need = [Register(\"$4\"),Register(\"$5\"),Register(\"$6\"),Register(\"$7\"),Register(\"$fp\"), Register(\"$sp\"),Register(\"$f12\"),Register(\"$f13\"),Register(\"$f14\"),Register(\"$f15\")]\n \n elif ins.instr == 'jr':\n if len(ins.args) == 1:\n self.need = [ins.args[0]]\n if ins.args[0].expr == \"$31\":\n self.need += [Register(\"$2\"), Register(\"$3\"),Register(\"$16\"),Register(\"$17\"),Register(\"$18\"),Register(\"$19\"),Register(\"$20\"),Register(\"$21\"),Register(\"$22\"),Register(\"$23\"),Register(\"$f0\"),Register(\"$f1\"),Register(\"$f2\"),Register(\"$f3\"),Register(\"$fp\"),Register(\"$sp\"),Register(\"$f20\"),Register(\"$f22\"),Register(\"$f24\"),Register(\"$f26\"),Register(\"$f28\"),Register(\"$f30\")] \n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'jalr':\n if len(ins.args) == 1:\n #Reg $4,5,6,7 are registers that are used for fuction arguments.\n # $f12, $f13, $f14, $f15 parameter registers zijn voor functies. \n # http://msdn.microsoft.com/en-us/library/ms253512%28v=vs.90%29.aspx\n #it is not clear which one will be used.\n self.need = [ins.args[0],Register(\"$4\"),Register(\"$5\"),Register(\"$6\"),Register(\"$7\"),Register(\"$fp\"), Register(\"$sp\"),Register(\"$f12\"),Register(\"$f13\"),Register(\"$f14\"),Register(\"$f15\")]\n self.gen = [Register(\"$2\"),Register(\"$3\"),Register(\"$f0\")] #Return values\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'beq':\n if len(ins.args) == 3:\n self.need = [ins.args[0],ins.args[1]]\n self.label = Label(ins.args[2])\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'bne':\n if len(ins.args) == 3:\n self.need = [ins.args[0],ins.args[1]]\n self.label = Label(ins.args[2])\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'blez':\n if len(ins.args) == 2:\n self.need = [ins.args[0]]\n self.label = Label(ins.args[1])\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'bgtz':\n if len(ins.args) == 2:\n self.need = [ins.args[0]]\n self.label = Label(ins.args[1])\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'bltz':\n if len(ins.args) == 2:\n self.need = [ins.args[0]]\n self.label = Label(ins.args[1])\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'bgez':\n if len(ins.args) == 2:\n self.need = [ins.args[0]]\n self.label = Label(ins.args[1])\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'bct':\n if len(ins.args) == 1:\n self.need = [Register(\"$fcc\")]\n self.label = Label(ins.args[0])\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'bcf':\n if len(ins.args) == 1:\n self.need = [Register(\"$fcc\")]\n self.label = Label(ins.args[0])\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'bc1f':\n if len(ins.args) == 1:\n self.need = [Register(\"$fcc\")]\n self.label = Label(ins.args[0])\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'bc1t':\n if len(ins.args) == 1:\n self.need = [Register(\"$fcc\")]\n self.label = Label(ins.args[0])\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)", "def nodeSeparate(self,compInfo, ifSub, subname, subcktName,numNodesSub):\n node = []\n nodeTemp = []\n nodeDic = {}\n pinInit = 'Modelica.Electrical.Analog.Interfaces.Pin '\n pinProtectedInit = 'Modelica.Electrical.Analog.Interfaces.Pin '\n protectedNode = []\n print \"CompInfo coming to nodeSeparate function: compInfo\",compInfo\n \n #Removing '[' and ']' from compInfo for Digital node\n for i in range(0,len(compInfo),1):\n compInfo[i] = compInfo[i].replace(\"[\",\"\").replace(\"]\",\"\")\n \n \n for eachline in compInfo:\n words = eachline.split()\n if eachline[0] in ['m', 'e', 'g', 't','M','E','G','T']:\n nodeTemp.append(words[1])\n nodeTemp.append(words[2])\n nodeTemp.append(words[3])\n nodeTemp.append(words[4])\n elif eachline[0] in ['q', 'j','J','Q']:\n nodeTemp.append(words[1])\n nodeTemp.append(words[2])\n nodeTemp.append(words[3])\n elif eachline[0]=='x' or eachline[0]=='X':\n templine = eachline.split()\n for i in range(0,len(templine),1):\n if templine[i] in subcktName:\n point = i \n nodeTemp.extend(words[1:point])\n else:\n nodeTemp.append(words[1])\n nodeTemp.append(words[2])\n for i in nodeTemp:\n if i not in node:\n node.append(i)\n \n for i in range(0, len(node),1):\n nodeDic[node[i]] = 'n' + node[i]\n if ifSub == '0':\n if i != len(node)-1:\n pinInit = pinInit + nodeDic[node[i]] + ', '\n else:\n pinInit = pinInit + nodeDic[node[i]]\n else:\n nonprotectedNode = self.getSubInterface(subname, numNodesSub) \n if node[i] in nonprotectedNode:\n continue\n else:\n protectedNode.append(node[i])\n if ifSub == '1':\n if len(nonprotectedNode) > 0:\n for i in range(0, len(nonprotectedNode),1):\n if i != len(nonprotectedNode)-1:\n pinProtectedInit = pinProtectedInit + nodeDic[nonprotectedNode[i]] + ','\n else:\n pinProtectedInit = pinProtectedInit + nodeDic[nonprotectedNode[i]]\n if len(protectedNode) > 0:\n for i in range(0, len(protectedNode),1):\n if i != len(protectedNode)-1: \n pinInit = pinInit + nodeDic[protectedNode[i]] + ','\n else:\n pinInit = pinInit + nodeDic[protectedNode[i]]\n pinInit = pinInit + ';'\n pinProtectedInit = pinProtectedInit + ';'\n print \"Node---->\",node\n print \"nodeDic----->\",nodeDic\n print \"PinInit----->\",pinInit\n print \"pinProtectedinit--->\",pinProtectedInit\n return node, nodeDic, pinInit, pinProtectedInit", "def _build_sub(self) -> str:\n return dedent(\n \"\"\"\n @SP\n M=M-1\n A=M\n D=M\n @SP\n M=M-1\n A=M\n M=M-D\n @SP\n M=M+1\n \"\"\"\n )", "def __init__(self,args):\n self.reg_ar = args[0]\n self.labtest_ar = args[1]\n self.vital_sign_ar = args[2]\n #self.lab_comb_ar = args[3]", "def __init__(self, encut, name=\"relax_settings\"):\n\n ionic = {\"EDIFF\": 1E-17, \"NSW\": 20, \"IBRION\": 2,\"ISIF\": 2, \"ISYM\": -1, \"NBLOCK\": 1, \"KBLOCK\": 20}\n InputParameters.__init__(self, ionic_settings=ionic, name=name)\n self.update_electronic_sttings(\"ENCUT\", encut)", "def _make_circuit_instructions(n_qubits, depth, type_circuit):\n\n if type_circuit in [0, 1, 2]:\n\n # if type_circuit == 1:\n # if depth > 8:\n # raise ValueError(\n # \"For type-1 circuits, only at most depth=8 allowed!\"\n # )\n\n # define rotations for circuit in each layer, 0: identity, 1:X, 2:Y 3:Z\n ini_pauli = np.zeros([depth, n_qubits], dtype=int)\n\n # set first and second layer, rest comes later\n ini_pauli[0, :] = 2 # y rotation\n if depth > 1:\n ini_pauli[1, :] = 3 # z rotation\n\n # construct natural parameterized circuit\n # gives which type of entangling gates at each layer -- first entry is\n # first qubit index, second is second qubit index, third entry is type\n # of entangling gate\n entangling_gate_index_list = [[] for i in range(depth)]\n orderList = []\n for i in range(n_qubits//2):\n if i % 2 == 0:\n orderList.append(i//2)\n else:\n orderList.append((n_qubits-i)//2)\n\n if n_qubits > 1:\n shiftList = [orderList[0]]\n else:\n shiftList = []\n for i in range(1, n_qubits//2):\n shiftList.append(orderList[i])\n shiftList += shiftList[:-1]\n\n # this list gives which entangling gates are applied in which layer\n if type_circuit == 0:\n # deep natural PQC, includes non-nearest neighbor gates\n for j in range(min(len(shiftList), int(np.ceil(depth/2))-1)):\n entangling_gate_index_list[1+2*j] = [\n [2*i, (2*i+1+2*shiftList[j]) % n_qubits, 0]\n for i in range(n_qubits//2)\n ]\n elif type_circuit == 1:\n # only do 2 entangling layers at max, and only do gates with\n # nearest neighbor and no ring\n for j in range(min(len(shiftList), 3)):\n if j == 0:\n entangling_gate_index_list[1+2*j] = [\n [2*i, (2*i+1+2*shiftList[j]) % n_qubits, 0]\n for i in range(n_qubits//2)\n ]\n elif (j == 1 or j == 2):\n # exclude ring gate and gate 0,1 on third entangling layer\n entangling_gate_index_list[1+2*j] = [\n [2*i, (2*i+1+2*shiftList[j]) % n_qubits, 0]\n for i in range(1, n_qubits//2)\n ]\n\n elif type_circuit == 2:\n # only do 3 regular entangling layers in a ring topology, then two\n # more phase gates with next-nearst neighbor, which requires one\n # swap. This adds 4 more parameters\n for j in range(min(len(shiftList), 3)):\n entangling_gate_index_list[1+2*j] = [\n [2*i, (2*i+1+2*shiftList[j]) % n_qubits, 0]\n for i in range(n_qubits//2)\n ]\n # entangling_gate_index_list[1+2*3]=[[0,n_qubits-1,1],[0,1,0],[n_qubits-1,n_qubits-2,0]]\n # entangling_gate_index_list[1+2*3]=[[0,n_qubits-1,1],[0,1,0],[n_qubits-1,n_qubits-2,0]]\n entangling_gate_index_list[1+2*3] = [\n [n_qubits-1, 1, 0],\n [0, n_qubits-2, 0]\n ]\n\n for i in range(len(entangling_gate_index_list)-1):\n if len(entangling_gate_index_list[i]) > 0:\n for j in range(len(entangling_gate_index_list[i])):\n qubit_index = entangling_gate_index_list[i][j][0]\n ini_pauli[i+1, qubit_index] = 2\n if i+2 < depth:\n ini_pauli[i+2, qubit_index] = 3\n\n elif type_circuit == 3:\n\n ini_pauli = np.ones([depth, n_qubits], dtype=int)*2\n\n for i in range(1, depth, 2):\n ini_pauli[i, :] = 3\n\n if n_qubits % 2 == 0:\n # even qubits ALT circuit needs to get rid of boundary rotations at\n # even entangling layers\n for i in range(4, depth, 4):\n ini_pauli[i, 0] = 0\n ini_pauli[i, -1] = 0\n if i+1 < depth:\n ini_pauli[i+1, 0] = 0\n ini_pauli[i+1, -1] = 0\n else:\n # for odd qubits, get rid of boundary either on top or bottom qubit\n for i in range(2, depth, 4):\n ini_pauli[i, -1] = 0\n if i+1 < depth:\n ini_pauli[i+1, -1] = 0\n for i in range(4, depth, 4):\n ini_pauli[i, 0] = 0\n if i+1 < depth:\n ini_pauli[i+1, 0] = 0\n\n # CNOT entangling gates\n entangling_gate_index_list = [[] for i in range(depth)]\n counter = 0\n # third index indicates type of entangling gate\n for k in range(1, depth-1, 2):\n\n # place entangler every second layer, do not place any at last\n if counter % 2 == 0:\n # even layer\n entangling_gate_index_list[k] = [\n [2*j, 2*j+1, 1] for j in range(n_qubits//2)\n ]\n else:\n # odd layer\n entangling_gate_index_list[k] = [\n [2*j+1, 2*j+2, 1] for j in range((n_qubits-1)//2)\n ]\n counter += 1\n\n else:\n raise ValueError('type_circuit='+f'{type_circuit}'+' not recognised.')\n\n return ini_pauli, entangling_gate_index_list" ]
[ "0.62633604", "0.5877768", "0.57064974", "0.5682109", "0.56702554", "0.5581789", "0.55173093", "0.54036635", "0.53858906", "0.5371969", "0.534201", "0.5316278", "0.52800095", "0.5277655", "0.5275859", "0.5246889", "0.5243808", "0.5242359", "0.5229417", "0.5203361", "0.5200167", "0.51831055", "0.5162933", "0.51602256", "0.5151136", "0.51181626", "0.5083447", "0.5083441", "0.5082825", "0.5075135" ]
0.6259067
1
separate the node numbers and create nodes in modelica file; the nodes in the subckt line should not be inside protected keyword. pinInit is the one that goes under protected keyword.
def nodeSeparate(self,compInfo, ifSub, subname, subcktName,numNodesSub): node = [] nodeTemp = [] nodeDic = {} pinInit = 'Modelica.Electrical.Analog.Interfaces.Pin ' pinProtectedInit = 'Modelica.Electrical.Analog.Interfaces.Pin ' protectedNode = [] print "CompInfo coming to nodeSeparate function: compInfo",compInfo #Removing '[' and ']' from compInfo for Digital node for i in range(0,len(compInfo),1): compInfo[i] = compInfo[i].replace("[","").replace("]","") for eachline in compInfo: words = eachline.split() if eachline[0] in ['m', 'e', 'g', 't','M','E','G','T']: nodeTemp.append(words[1]) nodeTemp.append(words[2]) nodeTemp.append(words[3]) nodeTemp.append(words[4]) elif eachline[0] in ['q', 'j','J','Q']: nodeTemp.append(words[1]) nodeTemp.append(words[2]) nodeTemp.append(words[3]) elif eachline[0]=='x' or eachline[0]=='X': templine = eachline.split() for i in range(0,len(templine),1): if templine[i] in subcktName: point = i nodeTemp.extend(words[1:point]) else: nodeTemp.append(words[1]) nodeTemp.append(words[2]) for i in nodeTemp: if i not in node: node.append(i) for i in range(0, len(node),1): nodeDic[node[i]] = 'n' + node[i] if ifSub == '0': if i != len(node)-1: pinInit = pinInit + nodeDic[node[i]] + ', ' else: pinInit = pinInit + nodeDic[node[i]] else: nonprotectedNode = self.getSubInterface(subname, numNodesSub) if node[i] in nonprotectedNode: continue else: protectedNode.append(node[i]) if ifSub == '1': if len(nonprotectedNode) > 0: for i in range(0, len(nonprotectedNode),1): if i != len(nonprotectedNode)-1: pinProtectedInit = pinProtectedInit + nodeDic[nonprotectedNode[i]] + ',' else: pinProtectedInit = pinProtectedInit + nodeDic[nonprotectedNode[i]] if len(protectedNode) > 0: for i in range(0, len(protectedNode),1): if i != len(protectedNode)-1: pinInit = pinInit + nodeDic[protectedNode[i]] + ',' else: pinInit = pinInit + nodeDic[protectedNode[i]] pinInit = pinInit + ';' pinProtectedInit = pinProtectedInit + ';' print "Node---->",node print "nodeDic----->",nodeDic print "PinInit----->",pinInit print "pinProtectedinit--->",pinProtectedInit return node, nodeDic, pinInit, pinProtectedInit
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_nodes(self):", "def TreeInit(tree):\n \"\"\" Settings/NI_6133 \"\"\"\n tree.addNode('.SETTINGS')\n tree.addNode('.SETTINGS.EXPERIMENT')\n tree.addNode('.SETTINGS.NI')\n tree.addNode('.SETTINGS.NI.NI_6602_TIME')\n tree.addNode('.SETTINGS.NI.NI_6133')\n tree.addNode('.NI_6133')\n tree.addNode('.NI_FPGA')\n tree.addNode('.SETTINGS.NI.NI_6133_DIO')\n tree.addNode('.TEK_2024B')\n tree.addNode('.TEK_2024B.TEK')\n tree.addNode('.TEK_2024B.TEK1')\n tree.addNode('.PIMAX3')\n tree.addNode('.PIMAX3.RAW')\n tree.addNode('.PIMAX3.CAM_SETTING')\n \"\"\" Single-valued member nodes \"\"\"\n AddNodeWithTag(tree,'.SETTINGS.EXPERIMENT:SHOT_DATE','TEXT',\n 'SHOTDATEANDTIME')\n AddNodeWithTag(tree,'.SETTINGS.EXPERIMENT:SHOT_NOTES','TEXT','SHOTNOTES')\n AddNodeWithTag(tree,'.SETTINGS.EXPERIMENT:SYS_MESSAGE','TEXT','SYSMESSAGE')\n AddNodeWithTag(tree,'.SETTINGS.EXPERIMENT:SHOT_QUALITY','TEXT',\n 'SHOTQUALITY')\n AddNodeWithTag(tree,'.SETTINGS.EXPERIMENT:SHOT_NUMBER','TEXT',\n 'SHOTNUMBER')\n AddNodeWithTag(tree,'.SETTINGS.EXPERIMENT:PROG_VERSION','TEXT',\n 'PROGRAM_VERSION')\n AddNodeWithTag(tree, '.TEK_2024B.TEK:RAW', 'TEXT', 'RAWTEKSCOPE')\n AddNodeWithTag(tree, '.TEK_2024B.TEK1:RAW', 'TEXT', 'RAWTEKSCOPE1')", "def modTree(\n tree\n ):\n\n # create file name \n filename=tree+\".internodeLabels.tree\"\n\n # internode label starter\n label=1\n\n # read in tree and create tags\n tree = Phylo.read(tree, 'nexus')\n # loop through internal nodes\n for i in tree.get_nonterminals():\n # create temp array to hold 'addtag, nodexyz, and children of node names'\n temp=[]\n temp.append(\"addtag\")\n nodeID=\"node\"+str(label)\n temp.append(nodeID)\n # for each internal node, get the children tips in the tree and append them to the temp list\n for ii in i.get_terminals():\n temp.append(ii.name)\n # prints lines for bayesTraits\n print(*temp, sep=' ')\n print(\"addMRCA\", nodeID, nodeID, sep=' ')\n # replace the confidence value with nodeID in the phylogeny.\n # This is for the additional newick tree that gets written out\n i.confidence=nodeID\n # add one for the next internode label\n label+=1\n #Phylo.write(tree, filename, 'nexus')", "def initnodes(self):\n newnodes = self.config[\"nodes\"]\n newpynodes = self.config[\"pynodes\"]\n logging.info('Loading initial nodes: {}'.format(newnodes))\n logging.info('Loading initial python nodes: {}'.format(newpynodes))\n for node in newnodes:\n self.runnode(node)\n for node in newpynodes:\n self.runnode(node, True)", "def __init__(self, num_nodes):\n self._num_nodes = num_nodes\n self._node_numbers = [node for node in range(num_nodes) for dummy_idx in range(num_nodes)]", "def __init__(self, num_nodes):\n self._num_nodes = num_nodes\n self._node_numbers = [node for node in range(num_nodes) for dummy_idx in range(num_nodes)]", "def __init__(self, num_nodes):\n self._num_nodes = num_nodes\n self._node_numbers = [node for node in range(num_nodes) for dummy_idx in range(num_nodes)]", "def __init__(self, num_nodes):\n self._num_nodes = num_nodes\n self._node_numbers = [node for node in range(num_nodes) for dummy_idx in range(num_nodes)]", "def __init__(self, num_nodes):\n self._num_nodes = num_nodes\n self._node_numbers = [node for node in range(num_nodes) for dummy_idx in range(num_nodes)]", "def __init__(self, num_nodes):\n self._num_nodes = num_nodes\n self._node_numbers = [node for node in range(num_nodes) for dummy_idx in range(num_nodes)]", "def initial_nodes_setup(config):\n # Nodes setup\n nodes = []\n path = config['urdf_data_path'] + 'nodes.csv'\n position_data = genfromtxt(path, delimiter=',')\n for i in range(config['simulation']['n_nodes']):\n info = {}\n info['position'] = [\n position_data[i][1] * 1.125, position_data[i][0] / 1.125\n ]\n info['importance'] = 0\n nodes.append(info)\n return nodes", "def __init__(self, sid, pressure, nodes, comment=''):\n if comment:\n self.comment = comment\n self.sid = sid\n self.pressure = pressure\n self.nodes = nodes\n assert len(self.nodes) in [3, 4], 'nodes=%s' % self.nodes", "def __init__(self):\n self.open = True # estado open significa que se pueden agregar nodos, false que no (estado operativo)\n self.num = 0\n self.num_fr = 0\n self.num_in = 0\n self.x0 = [] # coordenadas iniciales de los nodos\n self.x = [] # coordenadas actuales\n self.tipos = []\n self.mask_fr = []\n self.mask_in = []", "def makeTree(node,baseName,baseAddress,nodes,parentNode,vars,isGenerated):\n \n if (isGenerated == None or isGenerated == False) and node.get('generate') is not None and node.get('generate') == 'true':\n generateSize = parseInt(node.get('generate_size'))\n generateAddressStep = parseInt(node.get('generate_address_step'))\n generateIdxVar = node.get('generate_idx_var')\n for i in range(0, generateSize):\n vars[generateIdxVar] = i\n makeTree(node, baseName, baseAddress + generateAddressStep * i, nodes, parentNode, vars, True)\n return\n newNode = Node()\n name = baseName\n if baseName != '': name += '.'\n if node.get('id') is not None:\n name += node.get('id')\n name = substituteVars(name, vars)\n newNode.name = name\n if node.get('description') is not None:\n newNode.description = node.get('description')\n address = baseAddress\n if node.get('address') is not None:\n address = baseAddress + parseInt(node.get('address'))\n newNode.address = address\n newNode.real_address = (address<<2)+0x64000000\n newNode.permission = node.get('permission')\n newNode.mask = parseInt(node.get('mask'))\n newNode.isModule = node.get('fw_is_module') is not None and node.get('fw_is_module') == 'true'\n if node.get('sw_monitor_warn_min_threshold') is not None:\n newNode.warn_min_value = node.get('sw_monitor_warn_min_threshold') \n if node.get('sw_monitor_error_min_threshold') is not None:\n newNode.error_min_value = node.get('sw_monitor_error_min_threshold') \n nodes[name] = newNode\n if parentNode is not None:\n parentNode.addChild(newNode)\n newNode.parent = parentNode\n newNode.level = parentNode.level+1\n for child in node:\n makeTree(child,name,address,nodes,newNode,vars,False)", "def __init__(self,id,eds_file):\n super(I85, self).__init__()\n self.id = id\n self.eds_file = eds_file\n self.node = self.network.add_node(self.id, self.eds_file)", "def setUp(self):\n\n serial_times = {295: '1971-07-31T01:24:11.754',\n 296: '1971-07-31T01:24:36.970',\n 297: '1971-07-31T01:25:02.243',\n 298: '1971-07-31T01:25:27.457',\n 299: '1971-07-31T01:25:52.669',\n 300: '1971-07-31T01:26:17.923'}\n self.serials = ['APOLLO15/METRIC/{}'.format(i) for i in serial_times.values()]\n\n\n x = list(range(5))\n y = list(range(5))\n pid = [0,0,1,1,1]\n idx = pid\n serials = [self.serials[0], self.serials[1], self.serials[2],\n self.serials[2], self.serials[3]]\n\n\n columns = ['x', 'y', 'idx', 'pid', 'nid']\n self.data_length = 5\n\n data = [x,y, idx, pid, serials]\n\n self.creation_time = strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())\n cnet = C(data, index=columns).T\n\n io_controlnetwork.to_isis('test.net', cnet, mode='wb', targetname='Moon')\n\n self.header_message_size = 85\n self.point_start_byte = 65621", "def _create_sections(self):\n\t\t# NOTE: cell=self is required to tell NEURON of this object.\n\t\tself.node = [h.Section(name='node',cell=self) for x in range(self._axonNodes)]\n\t\tself.mysa = [h.Section(name='mysa',cell=self) for x in range(self._paraNodes1)]\n\t\tself.flut = [h.Section(name='flut',cell=self) for x in range(self._paraNodes2)]\n\t\tself.stin = [h.Section(name='stin',cell=self) for x in range(self._axonInter)]", "def read_zone_nodes(dim, Nmin, Nmax, ifile):\n line = ifile.readline()\n readline = False\n if re.search(re_parant, line): # check for initial paranthesis\n readline = True\n #dummy = lines.pop(0)\n global nodes \n nodes = zeros((dim, Nmax - Nmin + 1))\n for i in range(Nmin, Nmax + 1):\n if readline:\n line = ifile.readline()\n readline = True\n nodes[:, i - Nmin] = [eval(x) for x in line.split()]", "def create_modules(self):\n self.bitcell = self.replica_bitcell = self.mod_replica_bitcell()\n self.add_mod(self.bitcell)\n\n # This is the replica bitline load column that is the height of our array\n self.rbl = bitcell_array(name=\"bitline_load\", cols=1, rows=self.bitcell_loads)\n self.add_mod(self.rbl)\n\n # FIXME: The FO and depth of this should be tuned\n self.delay_chain = self.mod_delay_chain([self.delay_fanout]*self.delay_stages)\n self.add_mod(self.delay_chain)\n\n self.inv = pinv()\n self.add_mod(self.inv)\n\n self.access_tx = ptx(tx_type=\"pmos\")\n self.add_mod(self.access_tx)", "def init(node_tree):\n\n start_pos_x = 0\n start_pos_y = 0\n\n pos_x_shift = 185\n\n # init parent\n DifSpec.init(node_tree, disable_remap_alpha=True)\n\n base_tex_n = node_tree.nodes[DifSpec.BASE_TEX_NODE]\n spec_mult_n = node_tree.nodes[DifSpec.SPEC_MULT_NODE]\n vcol_mult_n = node_tree.nodes[DifSpec.VCOLOR_MULT_NODE]\n\n # move existing\n for node in node_tree.nodes:\n if node.location.x > start_pos_x + pos_x_shift:\n node.location.x += pos_x_shift * 2\n\n # node creation\n sec_geom_n = node_tree.nodes.new(\"ShaderNodeGeometry\")\n sec_geom_n.name = sec_geom_n.label = DifSpecOclu.SEC_GEOM_NODE\n sec_geom_n.location = (start_pos_x - pos_x_shift, start_pos_y + 1200)\n sec_geom_n.uv_layer = _MESH_consts.none_uv\n\n oclu_tex_n = node_tree.nodes.new(\"ShaderNodeTexture\")\n oclu_tex_n.name = oclu_tex_n.label = DifSpecOclu.OCLU_TEX_NODE\n oclu_tex_n.location = (start_pos_x + pos_x_shift, start_pos_y + 1200)\n\n oclu_sep_rgb_n = node_tree.nodes.new(\"ShaderNodeSeparateRGB\")\n oclu_sep_rgb_n.name = oclu_sep_rgb_n.label = DifSpecOclu.OCLU_SEPARATE_RGB_NODE\n oclu_sep_rgb_n.location = (start_pos_x + pos_x_shift * 3, start_pos_y + 1200)\n\n oclu_mix_n = node_tree.nodes.new(\"ShaderNodeMixRGB\")\n oclu_mix_n.name = oclu_mix_n.label = DifSpecOclu.OCLU_MIX_NODE\n oclu_mix_n.location = (start_pos_x + pos_x_shift * 4, start_pos_y + 1400)\n oclu_mix_n.blend_type = \"MULTIPLY\"\n oclu_mix_n.inputs['Fac'].default_value = 1\n\n oclu_a_mix_n = node_tree.nodes.new(\"ShaderNodeMath\")\n oclu_a_mix_n.name = oclu_a_mix_n.label = DifSpecOclu.OCLU_A_MIX_NODE\n oclu_a_mix_n.location = (start_pos_x + pos_x_shift * 4, start_pos_y + 1600)\n oclu_a_mix_n.operation = \"MULTIPLY\"\n\n # links creation\n node_tree.links.new(oclu_tex_n.inputs[\"Vector\"], sec_geom_n.outputs[\"UV\"])\n\n # pass 1\n node_tree.links.new(oclu_sep_rgb_n.inputs[\"Image\"], oclu_tex_n.outputs[\"Color\"])\n\n # pass 2\n node_tree.links.new(oclu_a_mix_n.inputs[0], base_tex_n.outputs[\"Value\"])\n node_tree.links.new(oclu_a_mix_n.inputs[1], oclu_sep_rgb_n.outputs[\"R\"])\n\n node_tree.links.new(oclu_mix_n.inputs[\"Color1\"], base_tex_n.outputs[\"Color\"])\n node_tree.links.new(oclu_mix_n.inputs[\"Color2\"], oclu_sep_rgb_n.outputs[\"R\"])\n\n # pass 3\n node_tree.links.new(spec_mult_n.inputs[\"Color2\"], oclu_a_mix_n.outputs[\"Value\"])\n\n # pass 4\n node_tree.links.new(vcol_mult_n.inputs[\"Color2\"], oclu_mix_n.outputs[\"Color\"])", "def constructNodes(self, name, offset, seq):\n self.paralogs.append([name, offset])\n self.sizes[name] = len(seq)\n for i in xrange(len(seq) - self.kmer_size + 1):\n kmer = strandless(seq[i:i + self.kmer_size].upper())\n if \"N\" in kmer:\n continue\n self.kmers.add(kmer)\n l = kmer + \"_L\"\n r = kmer + \"_R\"\n # should not be possible to have just left or just right\n if self.G.has_node(l) is not True and self.G.has_node(r) is not True:\n assert not(self.G.has_node(l) or self.G.has_node(r))\n self.G.add_node(l)\n self.G.add_node(r)\n self.G.add_edge(l, r, count=1, positions=defaultdict(list))\n self.G.edge[l][r]['positions'][name].append(i)\n else:\n self.G.edge[l][r]['count'] += 1\n self.G.edge[l][r]['positions'][name].append(i)\n assert len(self.G) % 2 == 0", "def __init__(self, nodes, ips, auth_pass, base_rid):\n self.total_nodes = nodes\n self.ips_p_node = ips\n self.mapping = []\n self.auth_pass = auth_pass\n self.base_rid = base_rid", "def changeNodeLib(ned, createNodeWin):\n pass", "def _build_topology(self):\n\t\t# childSection.connect(parentSection, [parentX], [childEnd])\n\t\tfor i in range(self._axonNodes-1):\n\t\t\tself.node[i].connect(self.mysa[2*i],0,1)\n\t\t\tself.mysa[2*i].connect(self.flut[2*i],0,1)\n\t\t\tself.flut[2*i].connect(self.stin[6*i],0,1)\n\t\t\tself.stin[6*i].connect(self.stin[6*i+1],0,1)\n\t\t\tself.stin[6*i+1].connect(self.stin[6*i+2],0,1)\n\t\t\tself.stin[6*i+2].connect(self.stin[6*i+3],0,1)\n\t\t\tself.stin[6*i+3].connect(self.stin[6*i+4],0,1)\n\t\t\tself.stin[6*i+4].connect(self.stin[6*i+5],0,1)\n\t\t\tself.stin[6*i+5].connect(self.flut[2*i+1],0,1)\n\t\t\tself.flut[2*i+1].connect(self.mysa[2*i+1],0,1)\n\t\t\tself.mysa[2*i+1].connect(self.node[i+1],0,1)", "def writeNodes(fil, nodes, nofs=1):\n fil.write(' NODAL COORDINATES 2.2.30\\n')\n for i,n in enumerate(nodes):\n fil.write(\"%10d%20.11e%20.11e%20.11e\\n\" % ((i+nofs,)+tuple(n)))\n fil.write('ENDOFSECTION\\n')", "def __init__(self, nodes=None):\r\n self.nodes = nodes", "def _read_skeleton(self, lines, line_index=0, n_lines=-1):\n line_index = line_index\n parents = []\n level = 0\n name = None\n if n_lines == -1:\n n_lines = len(lines)\n\n while line_index < n_lines:\n if lines[line_index].startswith(\"MOTION\"):\n break\n\n else:\n if \"{\" in lines[line_index]:\n parents.append(name)\n level += 1\n\n if \"}\" in lines[line_index]:\n level -= 1\n parents.pop(-1)\n if level == 0:\n break\n\n line_split = lines[line_index].strip().split()\n\n if line_split:\n\n if line_split[0] == \"ROOT\":\n name = line_split[1]\n self.root = name\n self.node_names[name] = {\n \"children\": [], \"level\": level, \"channels\": [], \"channel_indices\": []}\n\n elif line_split[0] == \"JOINT\":\n name = line_split[1]\n self.node_names[name] = {\n \"children\": [], \"level\": level, \"channels\": [], \"channel_indices\": []}\n self.node_names[parents[-1]][\"children\"].append(name)\n\n elif line_split[0] == \"CHANNELS\":\n for channel in line_split[2:]:\n self.node_channels.append((name, channel))\n self.node_names[name][\"channels\"].append(channel)\n self.node_names[name][\"channel_indices\"].append(len(self.node_channels) - 1)\n\n elif line_split == [\"End\", \"Site\"]:\n name += \"_\" + \"\".join(line_split)\n self.node_names[name] = {\"level\": level}\n # also the end sites need to be adde as children\n self.node_names[parents[-1]][\"children\"].append(name)\n\n elif line_split[0] == \"OFFSET\" and name in list(self.node_names.keys()):\n self.node_names[name][\"offset\"] = list(map(float, line_split[1:]))\n line_index += 1\n return line_index", "def __init__(__self__, *,\n nodes: pulumi.Input[Sequence[pulumi.Input[str]]]):\n pulumi.set(__self__, \"nodes\", nodes)", "def __init__(self, total_nodes_to_create):\n Topo.__init__(self)\n\n # Directory where this file / script is located\"\n selfPath = os.path.dirname(os.path.abspath(\n inspect.getfile(inspect.currentframe()))) # script directory\n\n # Initialize a service helper for Quagga with default options\n self.quaggaSvc = QuaggaService(autoStop=False)\n\n # Path configurations for mounts\n self.quaggaBaseConfigPath = selfPath + '/configs/'\n\n # List of Quagga host configs\n self.base_ip_address = [172, 0, 1, 1]\n self.subnet_mask = 16\n self.loopback_address = '127.0.0.1/24'\n self.host_prefix = 'a'\n self.total_nodes = 0\n\n # Add switch for IXP fabric\n self.ixpfabric = self.addSwitch('fabric-sw1')\n\n for i in range(total_nodes_to_create):\n self.add_node()", "def __init__(self):\n\n self.nodes = {}" ]
[ "0.6431492", "0.58888775", "0.5808518", "0.5718617", "0.568527", "0.568527", "0.568527", "0.568527", "0.568527", "0.568527", "0.5668452", "0.5654593", "0.56267965", "0.5613493", "0.55443066", "0.55038846", "0.54904735", "0.546983", "0.5460912", "0.5418785", "0.54137105", "0.5370444", "0.5366333", "0.53561395", "0.5347294", "0.5344168", "0.533097", "0.5317927", "0.53089696", "0.5306502" ]
0.68805975
0
It is main function of module Ngspice to Modelica converter
def main(args): if len(sys.argv) == 2: filename = sys.argv[1] else: print "USAGE:" print "python NgspicetoModelica.py <filename>" sys.exit() dir_name = os.path.dirname(os.path.realpath(filename)) file_basename = os.path.basename(filename) obj_NgMoConverter = NgMoConverter() #Getting all the require information lines = obj_NgMoConverter.readNetlist(filename) #print "Complete Lines of Ngspice netlist :lines ---------------->",lines optionInfo, schematicInfo = obj_NgMoConverter.separateNetlistInfo(lines) #print "All option details like analysis,subckt,.ic,.model : OptionInfo------------------->",optionInfo #print "Schematic connection info :schematicInfo",schematicInfo modelName, modelInfo, subcktName, paramInfo,transInfo = obj_NgMoConverter.addModel(optionInfo) print "Name of Model : modelName-------------------->",modelName print "Model Information :modelInfo--------------------->",modelInfo #print "Subcircuit Name :subcktName------------------------>",subcktName #print "Parameter Information :paramInfo---------------------->",paramInfo modelicaParamInit = obj_NgMoConverter.processParam(paramInfo) #print "Make modelicaParamInit from paramInfo :processParamInit------------->",modelicaParamInit compInfo, plotInfo = obj_NgMoConverter.separatePlot(schematicInfo) print "Info like run etc : CompInfo----------------->",compInfo #print "Plot info like plot,print etc :plotInfo",plotInfo IfMOS = '0' for eachline in compInfo: words = eachline.split() if eachline[0] == 'm': IfMOS = '1' break subOptionInfo = [] subSchemInfo = [] if len(subcktName) > 0: #subOptionInfo = [] #subSchemInfo = [] for eachsub in subcktName: filename_temp = eachsub + '.sub' data = obj_NgMoConverter.readNetlist(filename_temp) subOptionInfo, subSchemInfo = obj_NgMoConverter.separateNetlistInfo(data) for eachline in subSchemInfo: words = eachline.split() if eachline[0] == 'm': IfMOS = '1' break #print "Subcircuit OptionInfo : subOptionInfo------------------->",subOptionInfo #print "Subcircuit Schematic Info :subSchemInfo-------------------->",subSchemInfo node, nodeDic, pinInit, pinProtectedInit = obj_NgMoConverter.nodeSeparate(compInfo, '0', [], subcktName,[]) print "All nodes in the netlist :node---------------->",node print "NodeDic which will be used for modelica : nodeDic------------->",nodeDic #print "PinInit-------------->",pinInit #print "pinProtectedInit----------->",pinProtectedInit modelicaCompInit, numNodesSub = obj_NgMoConverter.compInit(compInfo,node, modelInfo, subcktName,dir_name,transInfo) print "ModelicaComponents : modelicaCompInit----------->",modelicaCompInit print "SubcktNumNodes : numNodesSub---------------->",numNodesSub connInfo = obj_NgMoConverter.connectInfo(compInfo, node, nodeDic, numNodesSub,subcktName) #print "ConnInfo------------------>",connInfo ###After Sub Ckt Func if len(subcktName) > 0: data, subOptionInfo, subSchemInfo, subModel, subModelInfo, subsubName,subParamInfo, modelicaSubCompInit, modelicaSubParam,\ nodeSubInterface,nodeSub, nodeDicSub, pinInitSub, connSubInfo = obj_NgMoConverter.procesSubckt(subcktName,numNodesSub,dir_name) #Adding 'numNodesSub' by Fahim #Creating Final Output file newfile = filename.split('.') newfilename = newfile[0] outfile = newfilename + ".mo" out = open(outfile,"w") out.writelines('model ' + os.path.basename(newfilename)) out.writelines('\n') if IfMOS == '0': out.writelines('import Modelica.Electrical.*;') elif IfMOS == '1': out.writelines('import BondLib.Electrical.*;') #out.writelines('import Modelica.Electrical.*;') out.writelines('\n') for eachline in modelicaParamInit: if len(paramInfo) == 0: continue else: out.writelines(eachline) out.writelines('\n') for eachline in modelicaCompInit: if len(compInfo) == 0: continue else: out.writelines(eachline) out.writelines('\n') out.writelines('protected') out.writelines('\n') out.writelines(pinInit) out.writelines('\n') out.writelines('equation') out.writelines('\n') for eachline in connInfo: if len(connInfo) == 0: continue else: out.writelines(eachline) out.writelines('\n') out.writelines('end '+ os.path.basename(newfilename) + ';') out.writelines('\n') out.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n\n # Load the data and scale\n x_train = np.load(\"../data/audio/ESC-10/esc10_raw_train_audio.npy\")[:,:,0]\n y_train = np.load(\"../data/audio/ESC-10/esc10_raw_train_labels.npy\")\n x_test = np.load(\"../data/audio/ESC-10/esc10_raw_test_audio.npy\")[:,:,0]\n y_test = np.load(\"../data/audio/ESC-10/esc10_raw_test_labels.npy\")\n\n x_train = (x_train.astype('float32') + 32768) / 65536\n x_test = (x_test.astype('float32') + 32768) / 65536\n\n # Train and test the models\n train(x_train, y_train, x_test, y_test)", "def convert():\n \n cvt_map = {\n '.add(' : '.add_subsystem(',\n '.add_param(' : '.add_input(',\n '.params': '._inputs',\n '.unknowns': '._outputs',\n '.resids': '._residuals',\n 'openmdao.test.util': 'openmdao.devtools.testutil',\n 'def solve_nonlinear(self, params, unknowns, resids)': 'def compute(params, unknowns)',\n }\n\n with open(sys.argv[1], 'r') as f:\n contents = f.read()\n for old, new in cvt_map.items():\n contents = contents.replace(old, new)\n\n sys.stdout.write(contents)", "def main(self):\r\n\r\n #Train the GEN and DISC\r\n self.modelTrain.main()\r\n self.disp.show()", "def main():\n module = IRODSPermissionModule()\n module.run()", "def main():\n \n # Load the notes used to train the model\n notes = pickle.load(open('data/notes', 'rb'))\n \n # Load the notes from all video games combined\n all_notes = pickle.load(open('data/all_notes', 'rb'))\n \n # Get number of unique notes, rests, and chords in the midi files\n n_vocab = len(set(all_notes))\n\n # Generate Network Inputs (list of lists containing note sequences)\n # Generate Normalized Network Input\n network_input, normalized_input = prepare_sequences(notes, all_notes, n_vocab)\n \n # Generate the Keras model with final dense layer having n_vocab number of nodes\n model = create_network(normalized_input, n_vocab)\n \n # Generate the note outputs from the model, and random sequence of notes for network input\n prediction_output = generate_notes(model, network_input, all_notes, n_vocab)\n \n # Create the Midi file from the generated note output\n create_midi(prediction_output)", "def main(Args):\n norm = [1.9844158727667542, 413.83759806375525,\n 51.2789974336363, 1038.4760551905683]\n input_pull = False\n input_model_mapping = False\n max_number = 2\n count = 40000\n catalog_name = os.path.join(DATA_PATH, 'OneDegSq.fits')\n # Define parameters for mrcnn model with btk here\n resid_model = btk_utils.Resid_btk_model(\n Args.model_name, Args.model_path, MODEL_DIR, training=True,\n images_per_gpu=4, validation_for_training=True)\n # Load parameters for dataset and load model\n resid_model.config.WEIGHT_DECAY = 0.001\n resid_model.config.STEPS_PER_EPOCH = 1000\n resid_model.config.VALIDATION_STEPS = 20\n sampling_function = None\n layers = 'all'\n if Args.model_name == 'model1':\n resid_model.config.BACKBONE = 'resnet41'\n elif Args.model_name == 'model2':\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model3':\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model4':\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model5':\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet35'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model4_large':\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = '4+' # '3+'\n elif Args.model_name == 'model6':\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 51.2789974336363, 1038.4760551905683]\n input_pull = True\n elif Args.model_name == 'model7':\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model8': # stretch = 0.1, Q = 3\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model9': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1., 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_again': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_again2': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_again3': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_2': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0., 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model11': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1., 0., 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model11_2': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1., 0., 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model12': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n max_number = 6\n elif Args.model_name == 'model12_again': # stretch = 2000, Q = 0.5 # larger learning rate\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n max_number = 10 # changed from 6 to 10 for run 4\n elif Args.model_name == 'model12_again2': # stretch = 2000, Q = 0.5 # larger learning rate val set reduced to 10\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n max_number = 6\n resid_model.config.VALIDATION_STEPS = 10\n else:\n raise AttributeError(\"model not found\", Args.model_name)\n print(\"Train in model:\", Args.model_name)\n resid_model.config.display()\n resid_model.make_resid_model(catalog_name, count=count,\n max_number=max_number, augmentation=True,\n norm_val=norm, input_pull=input_pull,\n sampling_function=sampling_function,\n input_model_mapping=input_model_mapping)\n learning_rate = resid_model.config.LEARNING_RATE/10.\n np.random.seed(Args.epochs)\n history = resid_model.model.train(resid_model.dataset,\n resid_model.dataset_val,\n learning_rate=learning_rate,\n epochs=Args.epochs,\n layers=layers)\n name = Args.model_name + '_run2'\n with open(name + \".dill\", 'wb') as handle:\n dill.dump(history.history, handle)\n learning_rate = resid_model.config.LEARNING_RATE/10.\n np.random.seed(Args.epochs + 10)\n history = resid_model.model.train(resid_model.dataset,\n resid_model.dataset_val,\n learning_rate=learning_rate,\n epochs=Args.epochs+10,\n layers=layers)\n name = Args.model_name + '_run3'\n with open(name + \".dill\", 'wb') as handle:\n dill.dump(history.history, handle)", "def main():\n # Parse command line arguments\n parser = argparse.ArgumentParser(\n prog=\"onnx\",\n description=\"A Command Line Interface for interacting with ONNX models\",\n epilog=\"test\\n\")\n\n parser.add_argument(\"-v\", \"--version\", action=\"store_true\",\n help=\"Print version information and quit\")\n\n # Subcommands\n subparsers = parser.add_subparsers(dest=\"subcommand\")\n\n convert_parser = subparsers.add_parser(\"convert\",\n help=\"Convert a model from an external format to the ONNX format\")\n convert_parser.add_argument(\"-f\", \"--framework\", type=str,\n choices=convert.framework_lkp.keys(),\n help=\"The source model framework\")\n convert_parser.add_argument(\"path\", type=str,\n help=\"The path to the source model\")\n\n args = parser.parse_args()\n if args.version:\n print(__version__)\n return 0\n\n try:\n cmd = cmd_lkp[args.subcommand]\n except KeyError:\n print(\"Subcommand required\")\n return 1\n\n cmd(args)", "def main(model,pmap):\n\n addPppParams(model)\n\n# addTransportParams(model,pmap)\n\n #translationSources(model)\n\n #addLipidMetabs(model)\n\n return", "def main():\n\n gpu_id = 1\n d_batch = 64\n d_embed = 256\n d_hidden = 256\n d_image_size = 256\n device = torch.device(f'cuda:{gpu_id}' if torch.cuda.is_available() else 'cpu')\n dataset, train_loader = get_default_flickr30k_loader(d_batch=d_batch, d_image_size=d_image_size)\n model = Img2Txt(dataset.d_vocab, d_embed, d_hidden, dataset.start_token, dataset.end_token).to(device)\n\n train(model, dataset, train_loader, device)", "def main():\n\n #\n # Generate waveform\n #\n\n print 'generating waveoform...'\n waveform = pmns_utils.Waveform('shen_135135_lessvisc')\n\n # Pick some extrinsic parameters\n ext_params = ExtParams(distance=1, ra=0.0, dec=0.0, polarization=0.0,\n inclination=0.0, phase=0.0, geocent_peak_time=0.0+5.0)\n\n # Construct the time series for these params\n waveform.make_wf_timeseries(theta=ext_params.inclination,\n phi=ext_params.phase)\n\n #\n # Generate IFO data\n #\n det1_data = DetData(waveform=waveform, ext_params=ext_params)\n\n from scipy import signal\n import pylab as pl\n\n pl.figure()\n pl.plot(det1_data.td_response.sample_times,det1_data.td_response.data)\n pl.plot(det1_data.td_signal.sample_times,det1_data.td_signal.data)\n\n pl.figure()\n f,p = signal.welch(det1_data.td_response.data, fs=1./det1_data.delta_t,\n nperseg=512)\n pl.loglog(f,np.sqrt(p))\n\n f,p = signal.welch(det1_data.td_signal.data, fs=1./det1_data.delta_t,\n nperseg=512)\n pl.loglog(f,np.sqrt(p))\n pl.ylim(1e-25,1e-21)\n pl.show()", "def convert( core ):\n print \"In the hycom_binary.convert routine\"\n\n args = core.variables\n print \"arg list = \", args\n\n afile = core.data\n # replace the .a with .b and save as 'bfile'\n bfile = str.replace( core.data, \".a\", \".b\" )\n with open( bfile, \"r\" ) as b:\n with open( afile, \"rb\" ) as a:\n lines = b.readlines()\n # This assumes idm and jdm are always\n # on lines 7 and 8 respectively\n idm = int(lines[7].split()[0])\n jdm = int(lines[8].split()[0])\n # read the last k value\n kdm = int(lines[-1].split()[4])\n # number of bytes in array\n n = idm*jdm\n numbytes = n*4 \n # each rec is padded to a multiple of 4096\n pad = (numbytes/4096+1)*4096-numbytes \n print \"numbytes = \", numbytes\n print \"pad = \", pad\n end = \">\" # Big endian\n form = \"f\" # unpack as ieee 32 bit floats\n vars = {} # store all the variables in a dictionary of lists\n lastRead = 0\n for lineno,line in enumerate( lines[10:] ):\n line = line.split()\n varName = line[0].strip()\n print \"varName = \", varName\n if varName in args:\n print varName, \"is in args!\"\n print \"lineno = \", lineno\n print \"lastRead = \", lastRead\n a.read( (lineno-lastRead)*(numbytes+pad) ) # skip through unwanted data\n array = core.struct.unpack(end+form*n, a.read( numbytes )) # read data\n a.read( pad )\n lastRead = lineno+1 # save the last line read for skipping future lines\n if varName in vars:\n # Append this array to the list of arrays (this makes it easier to\n # convert all the arrays into a 3 dimensional list later on)\n print varName, \" is in vars!\"\n\n # Do some preliinary error checking\n filtered = filter( lambda x: x<1e30, array ) # remove nans\n if abs(min(filtered)-float(line[6].strip())) > 1e-6:\n sys.exit(\"ERROR: The data's min is not equal to the .b file's min\")\n if abs(max(filtered)-float(line[7].strip())) > 1e-6:\n sys.exit(\"ERROR: The data's max is not equal to the .b file's max\")\n if len(vars[varName])+1 != int( line[4].strip() ):\n sys.exit(\"ERROR: Level of this array is out of sequence. Missed a record\")\n \n vars[varName].append( core.np.array( array ) )\n\n else:\n # Else add a new element to the dictionary\n print \"Adding new element \", varName, \" to vars!\"\n vars[varName] = [ core.np.array( array ) ]\n \n print \"vars.keys()[1] = \", vars.keys()\n print \"len( vars.values()[1] ) = \", len(vars.values()[1])\n\n # Convert to vtk now \n # Make mesh...\n \n nX = idm\n nY = jdm\n nZ = kdm \n conn = []\n pts = []\n rad = []\n cntr = 0\n for k in range(nZ):\n for j in range(nY):\n for i in range(nX):\n #print \"pt%d = (%d,%d,%d)\" % (cntr,i,j,k)\n cntr += 1\n pts.extend([ i, j, k ])\n rad.append( 0 )\n if k < nZ-1 and j < nY-1 and i < nX-1:\n pt1 = k*(nX*nY) + j*nX + i;\n pt2 = k*(nX*nY) + j*nX + i+1;\n pt3 = k*(nX*nY) + (j+1)*nX + i+1;\n pt4 = k*(nX*nY) + (j+1)*nX + i;\n pt5 = (k+1)*(nX*nY) + j*nX + i;\n pt6 = (k+1)*(nX*nY) + j*nX + i+1;\n pt7 = (k+1)*(nX*nY) + (j+1)*nX + i+1;\n pt8 = (k+1)*(nX*nY) + (j+1)*nX + i;\n\n conn.append([ \"hexahedron\", pt1, pt2, pt3, pt4, pt5, pt6, pt7, pt8 ])\n \n# variables = []\n# for name,lst in vars.iteritems():\n# fullArray = core.np.ndarray( (kdm,jdm,idm) )\n# print \"fullArray.shape = \", fullArray.shape\n# print \"fullArray[0].shape = \", fullArray[0].shape\n# print \"fullArray[:].shape = \", fullArray[:].shape\n# print \"fullArray[:][0].shape = \", fullArray[:][0].shape\n# print \"fullArray[:][:][0].shape = \", fullArray[:][:][0].shape\n# print \"fullArray.ndim = \", fullArray.ndim\n# print \"fullArray.size = \", fullArray.size\n# for lvl,array in enumerate(lst):\n# print \"Level = \", lvl\n# print \"array.shape = \", array.shape\n# print \"array.ndim = \", array.ndim\n# print \"array.size = \", array.size\n# tmp = array.reshape( jdm,idm )\n# print \"tmp.shape = \", tmp.shape\n# print \"tmp.ndim = \", tmp.ndim\n# print \"tmp.size = \", tmp.size\n# fullArray[lvl] = array.reshape( jdm,idm )\n# variables.append( (name, 1, 1, fullArray.tolist()) )\n\n##\n# for time in xrange( totalTime ):\n# # Data arrays\n# u_wind = dataVars['uwind_stress'][:][time].tolist()\n# v_wind = dataVars['vwind_stress'][:][time].tolist()\n# wind = []\n# for i in xrange( numberOfElements ):\n# wind.append( u_wind[i] )\n# wind.append( v_wind[i] )\n# wind.append( 0.0 )\n#\n# # Create the variables such as vectors (velocity) and scalars (temperature/salinity)\n# vars = [(\"wind\", 3, 0, wind), (\"u_wind\", 1, 0, u_wind ), (\"v_wind\", 1, 0, v_wind)]\n##\n \n variables = core.np.zeros( len(pts) )\n print \"variables.shape = \", variables.shape\n print \"variables.ndim = \", variables.ndim\n print \"variables.size = \", variables.size\n\n print \"len(pts) = \", len(pts)\n print \"len(pts)/3 = \", len(pts)/3\n \n var_datum = [ \"radius\", 1, 1, variables.tolist() ]\n vars = [ var_datum ]\n outfile = core.output + \".vtk\"\n core.vw.WriteUnstructuredMesh(outfile, 0, pts, conn, vars)", "def _main(args):\n # model = keras.models.load_model(args.h5_file, custom_objects={'prelu': prelu})\n with tf.keras.utils.custom_object_scope({'prelu': prelu}):\n converter = tf.lite.TFLiteConverter.from_keras_model_file(args.h5_file)\n tflite_file = converter.convert()\n open(args.tflite_file, 'wb').write(tflite_file)\n print('='*30)\n print('tffile file save in {}.'.format(args.tflite_file))", "def main():\n obj = VplexStorageview()\n obj.perform_module_operation()", "def main(self):", "def main():\n\n obj = PowerStoreNfsExport()\n obj.perform_module_operation()", "def main():\n ModLoader.add_constructor(\"!ec2rlcore.module.Module\", ModLoader.ignoretag)\n\n mod_src_dir = os.path.join(os.getcwd(), \"src\")\n try:\n os.stat(mod_src_dir)\n except Exception:\n os.mkdir(mod_src_dir)\n\n try:\n for mod_file_name in os.listdir(os.path.join(root_ec2rl_dir, \"mod.d\")):\n if mod_file_name == \"ex_remediation.yaml\":\n continue\n with open(os.path.join(root_ec2rl_dir, \"mod.d\", mod_file_name), \"r\") as yamlfile:\n module = yaml.load(yamlfile, Loader=ModLoader)\n if module[\"language\"] == \"python\":\n mod_src_path = os.path.join(mod_src_dir, \"{}.py\".format(module[\"name\"]))\n with open(mod_src_path, \"w\") as pyfile:\n pyfile.write(module[\"content\"])\n print(\"Wrote: {}\".format(mod_src_path))\n print(\"Conversion complete.\")\n except Exception as ex:\n print(ex)\n print(\"Conversion failed. Please review the exception to resolve\")", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():" ]
[ "0.64135325", "0.64008504", "0.6285087", "0.61439496", "0.61026406", "0.6097184", "0.60559285", "0.60184", "0.6000427", "0.59560597", "0.59112316", "0.59103507", "0.5902412", "0.58892053", "0.5882462", "0.5876786", "0.5848752", "0.5848752", "0.5848752", "0.5848752", "0.5848752", "0.5848752", "0.5848752", "0.5848752", "0.5848752", "0.5848752", "0.5848752", "0.5848752", "0.5848752", "0.5848752" ]
0.7880283
0
Loads the given filename found at filepath with the preferred options. Accepts CSV, XLS, XLSX, HTML, and XML.
def load(path, directory=None): if directory is not None: path = os.path.join(directory, path) _, ext = os.path.splitext(path) if ext.lower() == '.csv': try: df = pd.read_csv(path, index_col=None, dtype=object) return df except: df = pd.read_csv(path, index_col=None, dtype=object, encoding="ISO-8859-1") return df elif ext.lower() == '.xls' or ext.lower() == '.xlsx': try: df = pd.read_excel(path, index_col=None, dtype=object) return df except: raise Exception else: ## Incase there isn't a filetype specified try: df = pd.read_csv(path, index_col=None, dtype=object) return df except: df = pd.read_csv(path, index_col=None, dtype=object, encoding="ISO-8859-1") return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def open_file(filepath: str, sep: str = \";\"):\n _, extension = filepath.rsplit(\".\", 1)\n if not os.path.exists(filepath):\n raise FileNotFoundError(filepath)\n if extension == \"csv\":\n content = pd.read_csv(filepath, sep=sep)\n elif extension == \"yaml\":\n content = open_yaml(filepath)\n else:\n content = joblib.load(filepath)\n return content", "def load_from_filepath(filepath: str, allow_unknown_file_type=False):\n try:\n return load_from_filepath_or_content(filepath, _allow_content=False)\n except NoCompatibleLoaderFoundError:\n if not allow_unknown_file_type:\n raise\n # Load the raw contents from file and assume that they are to be\n # interpreted as a raw string.\n with open(filepath) as f:\n return f.read().strip()", "def read_file(path,filename,sheet_name = None, delimiter = None):\n os.chdir(path)\n filetype = file.split('.')[1] #splits filename by delimeter '.' to give 'csv' 'xls' or 'xlsx'\n \n if filetype == 'csv':\n if delimiter == None: delimiter = ',' #csv default delimeter should be a comma, but may often be a semi-colon!\n df = pd.read_csv(file,delimiter)\n \n else:\n df = pd.read_excel(file,sheet_name)\n \n return df", "def load(self,filename):\n basename = os.path.basename(filename)\n self.name, ext = os.path.splitext(basename)\n if ext == '.xml':\n self.load_xml(filename)\n elif ext == '.tsv':\n self.load_tsv_fast(filename)\n elif ext == '.tsvs':\n self.load_tsv(filename)\n else:\n print 'Error: only .xml and .tsv files are supported'", "def load_file(path):\n frmt = path.suffix.replace('.', '', 1)\n if frmt not in SUPPORTED_FORMATS:\n raise RuntimeError(\n 'Unknown file format \"{}\" for file {}. '\n 'Supported formats are: {}.'.format(\n frmt, path,\n ', '.join(sorted(SUPPORTED_FORMATS.keys())),\n )\n )\n\n # Load file\n content = load_content(path.read_text(encoding='utf-8'), frmt)\n return content", "def load(file_choice = file):\n\t\tif file_choice!=file:\n\t\t\tfile = file_choice\n\n\t\tpass", "def load_data(self, filepath, sep=\",\"):\n if filepath.split('.')[-1] == 'csv':\n self.data = pd.read_csv(filepath, sep=sep)\n elif filepath.split('.')[-1] == 'json':\n self.data = pd.read_json(filepath)\n else:\n print 'Please select a csv or json file'", "def load_file(self, filepath):\n _, fileext = os.path.splitext(filepath)\n with open(filepath) as fhandler:\n return replace_paths({\n '.yml': lambda: yaml.load(fhandler, Loader=yamlloader.ordereddict.CLoader),\n '.json': lambda: json.load(fhandler, object_pairs_hook=OrderedDict)\n }.get(fileext, lambda: fhandler.read())())", "def from_csv(\n cls,\n filepath: typing.Union[str, pathlib.Path, typing.TextIO],\n ) -> \"ConversionSpec\":\n if not isinstance(filepath, (str, pathlib.Path)):\n return cls._from_csv(filepath)\n fp = pathlib.Path(filepath)\n with fp.open(newline=\"\") as fd:\n return cls._from_csv(fd)", "def load_data(filepath):\n\n file_path_casted = Path(filepath)\n if not file_path_casted.exists():\n raise FileNotFoundError(\"File does not exist.\")\n\n data = pd.read_csv(filepath, delimiter=\",\")\n\n return data", "def loadData(fileName, fileType):\n try:\n if (fileType == 'xlsx' or fileType == 'xls' or fileType == 'excel'):\n temp = pd.read_excel(fileName)\n elif fileType == 'csv':\n temp = pd.read_csv(fileName)\n else:\n return\n except FileNotFoundError:\n return 'FileNotFoundError'\n else:\n return temp", "def _handleLoadFile(self) -> None:\n\n dialog: ChooseFileDialog = self._makeChooseFileDialog()\n result: DialogResult = dialog.show()\n if result == DialogResult.Ok:\n file: str = dialog.getSelectedFile()\n self._setWindowTitle(file)\n data: List[List[Any]] = csvReader.readFile(file)\n self.__spreadsheet.setData(data)", "def dispatch_loader(fname, direc, sep=\"\\t\"):\n ext = fname.split(\".\")[-1]\n # print('Loading from: {}/{}'.format(direc, fname))\n if ext in (\"tsv\" or \"txt\"):\n return load_df_from_txt(fname, direc, sep)\n elif ext == \"pkl\":\n return load_df_from_pkl(fname, direc)\n else:\n raise IOError(\"Unexpected file extension {}.\".format(ext))", "def load_file(fname, fpath='./', delimiter=','):\n\n dest = fpath + fname\n print(f\"Loading file {dest} ...\")\n df_file = pd.read_csv(dest, delimiter=delimiter)\n\n return df_file", "def loadFile(self, filename):\n #TODO: do a contents based detection\n if filename[-4:].lower() == '.txt':\n self.loadTIText(open(filename, \"rb\"))\n elif filename[-4:].lower() in ('.a43', '.hex'):\n self.loadIHex(open(filename, \"rb\"))\n else:\n self.loadELF(open(filename, \"rb\"))", "def loadFile(self, filename):\n #TODO: do a contents based detection\n if filename[-4:].lower() == '.txt':\n self.loadTIText(open(filename, \"r\"))\n elif filename[-4:].lower() in ('.a43', '.hex'):\n self.loadIHex(open(filename, \"r\"))\n else:\n self.loadELF(open(filename, \"rb\"))", "def load_csv(filename, dialect='excel', encoding='utf-8'):\n return Context.fromfile(filename, 'csv', encoding, dialect=dialect)", "def load(self, filepath):\n _ = filepath\n return self", "def load_csv(*, path, filename, sep=\"\\t\", verbose=True):\n \n os.chdir(path)\n if len(glob.glob(filename))==1: \n df = pd.read_csv(filename, sep=sep, low_memory=False)\n \n # display example,\n if verbose==True:\n display(df.head(3))\n print(df.shape)\n else:\n pass\n \n # return,\n return df\n \n else:\n if verbose==True:\n print(f\"\"\"ERROR :csv file {filename}, was not found in: \\n {path}\"\"\")\n else:\n pass", "def load_file(*args, **kwargs): # real signature unknown\n pass", "def _read_file(self):\n # Convert str to pathlib.Path() object.\n if isinstance(self.file_path, str):\n self.file_path = Path(self.file_path)\n\n # Check if file_path exists:\n if not self.file_path.exists():\n raise FileNotFoundError(f'File not found: {self.file_path}')\n\n # Open and read the \"xls\" file\n with open(self.file_path, encoding='windows-1252') as f:\n self.html_string = f.read()", "def load_csvFile(file_location, file_name,sep,encoding):\n try:\n fullpath=file_location+file_name\n df = pd.read_csv(fullpath, encoding=encoding,sep=sep)\n return df\n except IOError:\n print('Error loading the file: ' , file_name)\n sys.exit(1)", "def load_data(path):\r\n\r\n _, ftype = os.path.splitext(path) #get fname and extension\r\n\r\n if os.path.isfile(path):\r\n with open(path) as f:\r\n\r\n if ftype == \".json\" or ftype == \".geojson\": #handle json\r\n data = json.load(f)\r\n # print(data)\r\n return data\r\n\r\n elif ftype == \".csv\": #handle csv with csv reader\r\n with open(path, newline ='') as csvfile:\r\n data = csv.DictReader(csvfile)\r\n return list(data)\r\n\r\n else:\r\n print(\"neither json or csv\")\r\n return None", "def loadCSV(input_file):", "def from_file(cls, filename, copy_input=False, **read_kwargs):\n _, ext = os.path.splitext(filename)\n if ext == '.csv':\n input_df = pd.read_csv(filename, **read_kwargs)\n elif ext in ('.xls', '.xlsx'):\n input_df = pd.read_excel(filename, **read_kwargs)\n else:\n raise TypeError(\n 'from_file reads only .csv, .xls, or .xlsx filetypes'\n )\n return cls(input_df, copy_input)", "def load(self, filename):\n raise NotImplementedError", "def load(self, filename):\n pass", "def _load(self, file_path, **kwargs):\n raise NotImplementedError()", "def load_csv(filename: str, solr_url: typing.Optional[str]):\n\n solr_client = Solr(solr_url, always_commit=True) if solr_url else Solr(\"\")\n\n csv_data = { row[\"Item ARK\"]: row for row in csv.DictReader(open(filename)) }\n\n config = {\n \"collection_names\": {\n row[\"Item ARK\"]: row[\"Title\"] for row in csv_data.values() if row[\"Object Type\"] == \"Collection\"\n },\n \"controlled_fields\": load_field_config(\"./fields\"),\n \"child_works\": collate_child_works(csv_data),\n }\n\n controlled_fields = load_field_config(\"./fields\")\n\n mapped_records = []\n for row in rich.progress.track(csv_data.values(), description=f\"Importing {filename}...\"):\n if row[\"Object Type\"] not in (\"ChildWork\", \"Page\"):\n mapped_records.append(map_record(row, solr_client, config=config))\n\n if solr_url:\n solr_client.add(mapped_records)\n else:\n print(json.dumps(mapped_records))", "def load(cls, filename):\n \n raise NotImplementedError(\"not implemented!\")" ]
[ "0.6457961", "0.6086716", "0.6062022", "0.6057617", "0.5977015", "0.5924193", "0.58977485", "0.585386", "0.5848094", "0.5767723", "0.57574713", "0.57529175", "0.5707004", "0.5656995", "0.5639052", "0.56309414", "0.5627251", "0.5604909", "0.5593014", "0.55896103", "0.5577483", "0.55611473", "0.5526656", "0.5510125", "0.5455059", "0.5452243", "0.54197896", "0.54190147", "0.54061097", "0.5377688" ]
0.65248215
0
Converts all xls files at the given path to CSV files and outputs them into a directory named after the file. If the xls file has sheets, those are also outputted to the new directory.
def csv_from_excel(path=os.getcwd()): path = path + '/*.xls*' files = glob.glob(path) for i in files: file = os.path.basename(i) filename = os.path.splitext(file)[0] xls_file = pd.ExcelFile(i, index_col=None, dtype=object) if len(xls_file.sheet_names) > 1: try: os.mkdir(filename) except OSError: print('Could not create directory to output to.') for x in xls_file.sheet_names: file = pd.read_excel(xls_file, x, index_col=None, dtype=object) file.to_csv(filename + '/' + x + '.csv', quoting=1, index=False) else: file = xls_file.parse() file.to_csv(filename + '.csv', quoting=1, index=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_excel_files(self):\n for x in self.files:\n if x[-4:] not in [\".xls\", \"xlsx\"]:\n continue\n else:\n files = pd.read_excel(x, sheet_name=None)\n for k, v in files.items():\n #get name with the extension stripped\n name = k.split(\".\")[0]\n out_path = x.split(\".\")[0]\n try:\n os.mkdir(out_path)\n except:\n print(\"directory exists\")\n v.to_csv(f\"{out_path}/{name}.csv\", index=False)\n os.remove(x)\n self.files = [os.path.join(dp, f) for dp, dn, fn in os.walk(self.path) for f in fn]\n self.csv_files = [x for x in self.files if x[-3:] == \"csv\"]", "def excelToCsv(folder):\n for excelFile in os.listdir(folder):\n # Skip non-xlsx files, load the workbook object.\n if not excelFile.endswith('xlsx'):\n continue\n wb = openpyxl.load_workbook(excelFile)\n\n for sheetName in wb.get_sheet_names():\n # Loop through every sheet in the workbook.\n sheet = wb.get_sheet_by_name(sheetName)\n\n # Create the CSV filename from the Excel filename and sheet title.\n csvFilename = excelFile.split('.')[0]+'_'+sheet.title+'.csv'\n csvFileObj = open(csvFilename, 'w', newline='')\n\n # Create the csv.writer object for this CSV file.\n csvWriter = csv.writer(csvFileObj)\n\n # Loop through every row in the sheet.\n for rowObj in sheet.rows:\n rowData = [] # append each cell to this list\n # Loop through each cell in the row.\n for cellObj in rowObj:\n # Append each cell's data to rowData.\n rowData.append(cellObj.value)\n # Write the rowData list to the CSV file.\n csvWriter.writerow(rowData)\n\n csvFileObj.close()", "def convert_to_csv(path):\n directory = os.path.dirname(path)\n file_name_w_ext = os.path.basename(path)\n file_name = os.path.splitext(file_name_w_ext)[0]\n csv_file_path = os.path.join(directory, file_name + \".csv\")\n \n data_xls = pd.read_excel(path, 'Data-Residential Composition')\n data_xls.to_csv(csv_file_path, index=False)", "def excel2csv(excel_path: str) -> str:\n csv_path = excel_path.replace('xlsx', 'csv')\n\n # Create pandas Data Frame from an excel file and save it into csv.\n df_excel = pd.read_excel(excel_path)\n df_excel.to_csv(csv_path, index=False)\n\n os.remove(excel_path)\n\n excel_filename = excel_path.split('/')[-1]\n csv_filename = csv_path.split('/')[-1]\n\n print(f\"Extracting csv from xlsx... {excel_filename} Saving... {csv_filename}'\")\n print(f\"Deleting... {excel_filename}\")\n\n return csv_path", "def to_csv_files(self, path):\n self._to_dict_tree().to_csv_files(path)", "def make_csvs():\n for oneFile in glob.glob('excel/*.xlsx'):\n pd = pandas.read_excel(oneFile)\n outName = os.path.splitext(os.path.basename(oneFile))[0]\n pd.to_csv('lists/'+outName+'.csv',index=False)", "def to_csv(self, path):\n if os.path.isdir(path):\n shutil.rmtree(os.path.join(path))\n os.makedirs(path)\n\n for name, df in self.input_data.items():\n name += \".csv\"\n filename = os.path.join(path, name)\n df.to_csv(filename)\n logging.info(\"Scenario saved as csv-collection to %s\", path)", "def county_file_merger(folder_path):\n\n print(\"\\n*******************--- Starting File Merger for .csv files ---*******************\")\n with open(\"result.csv\",\"wb\") as outfile:\n for filename in os.listdir(folder_path):\n with open(filename,\"rb\") as infile:\n for line in infile:\n outfile.write(line)\n infile.close()\n outfile.close()\n print(\"\\nResult saved to -----> result.csv \")\n print(\"\\n*******************--- Finished File Merger for .csv files ---*******************\")", "def xlsx_to_csv(xlsx_file):\n csv_file = xlsx_file[:-5] + '.csv'\n with xlrd.open_workbook(xlsx_file) as wb:\n sh = wb.sheet_by_index(0)\n with open(csv_file, 'w') as r_csv_new:\n writer = csv.writer(r_csv_new)\n for r in range(sh.nrows):\n writer.writerow(sh.row_values(r))\n logging.info('Converted pretty xlsx to plain csv')", "def batch_convert(inputfiles, outputdir=None, fnfmt=\"{fnroot}_{sheetname}.csv\",\n sep_out=None, dialect=\"excel\", usecsvmodule=False,\n sheets=None,\n read_stdin=False, write_stdout=False,\n overwrite=True, discart=r'~$'):\n\n if outputdir:\n outputdir = os.path.expanduser(outputdir)\n if not os.path.exists(outputdir):\n print(\"Creating output directory:\", outputdir)\n os.mkdir(outputdir)\n elif not os.path.isdir(outputdir):\n print(\"\\n\\nERROR: outputdir %s exists but is not a directory, aborting!\" % (outputdir,))\n return\n fnfmt = os.path.join(outputdir, fnfmt)\n\n print(\"Converting %s files, saving to outputdir '%s'\" % (len(inputfiles), outputdir if outputdir else \".\"))\n for inputfn in inputfiles:\n if inputfn.startswith('~') or inputfn.startswith('$') or inputfn.startswith('.'):\n # Dont want these:\n print(\"Ignoring file:\", inputfn)\n continue\n\n print(\"Converting xlsx file:\", inputfn)\n print(\" - outputfn:\", fnfmt)\n nfiles = convert_xlsx_to_csv(\n inputfn, outputfmt=fnfmt,\n sep_out=sep_out, dialect=dialect, usecsvmodule=usecsvmodule,\n sheets=sheets, overwrite=overwrite\n )\n print(\" - %04s files/sheets saved.\" % (nfiles,))", "def convert_csv(path_csv):\n with open(path_csv, 'rb') as f:\n reader = csv.reader(f, delimiter=\",\")\n workbook = xlwt.Workbook()\n sheet = workbook.add_sheet(u\"Лист 1\")\n for r, row in enumerate(reader):\n for c, col in enumerate(row):\n sheet.write(r, c, col.decode(\"utf-8\"))\n file_name, file_ext = os.path.splitext(os.path.basename(path_csv))\n if not os.path.isdir(\"out\"):\n os.mkdir(\"out\")\n workbook.save(\"out\" + str(os.path.sep) + file_name + '.xls')\n return \"out\" + str(os.path.sep) + file_name + '.xls'", "def split_data_into_exchanges(source_path, destination_path):\n for subdir, dirs, files in os.walk(source_path):\n for file in files:\n source_full_file = os.path.join(subdir, file)\n print(source_full_file)\n df = pd.read_csv(source_full_file)\n for group_name, df in df.groupby(['Ticker', 'Exchange']):\n file_name = destination_path / str(df['Date'].iloc[0]) / convertTuple(group_name)\n utils.make_dir(file_name)\n with open(file_name, \"w+\") as f:\n df.to_csv(f, index=False)", "def output_into_file(self, path: str):\n # Creating path if not exist\n Path(path).mkdir(parents=True, exist_ok=True)\n # Writing every day as a csv file\n for day in self:\n with open(f\"{path}/{day.name}.csv\", \"w\") as file:\n writer = csv.writer(file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n # First line / Title\n writer.writerow([\" \", day.name])\n for shift in day:\n employees = \", \".join([e.name for e in shift.employees])\n writer.writerow([f\"{shift.start}-{shift.end}\", employees])", "def csv(self, destination_path):\n # todo - test for single and duplicate base cases\n to_csv(self._axl_data, destination_path)", "def collect_csv(source_dir, dest_dir):\n source_dir = Path(source_dir)\n dest_dir = Path(dest_dir)\n for csvfile in source_dir.rglob(\"*.csv\"):\n species = normalized_species(csvfile)\n species_dir = dest_dir / species\n species_dir.mkdir(exist_ok=True, parents=True)\n date_time = normalized_datetime(csvfile)\n print(f\"Renaming {csvfile} to {species_dir / (date_time + '.csv')}\")\n csvfile.rename(species_dir / (date_time + \".csv\"))", "def convert_xlsx_to_csv(inputfn, outputfmt=\"{fnroot}_{sheetname}.csv\",\n sheets=None, overwrite=None,\n usecsvmodule=False, sep_out=None, dialect=\"excel\"):\n # save_as(file_name=inputfn, dest_file_name=outputfn, dest_file_type='csv')\n wb = load_workbook(inputfn, read_only=True)\n wb.get_sheet_names()\n fnroot, ext = os.path.splitext(inputfn)\n if not usecsvmodule and sep_out is None:\n sep_out = '\\t'\n nfiles = 0\n nbytes_tot = 0\n if sheets:\n # print(\"sheets:\", sheets)\n sheet_idxs = set(sheets)\n for sheet in sheets:\n try:\n sheet_idxs.add(int(sheet))\n except ValueError:\n pass\n sheets = sheet_idxs\n # print(\"sheet_idxs:\", sheet_idxs)\n for sheetidx, ws in enumerate(wb.worksheets, 1):\n if sheets and (sheetidx not in sheet_idxs and ws.title not in sheet_idxs):\n print(\"EXCLUDING sheet %02s: %s\" % (sheetidx, ws.title))\n continue\n print(\"Using worksheet '%s' in workbook '%s'\" % (ws.title, inputfn))\n outputfn = outputfmt.format(fnroot=fnroot, sheetname=ws.title)\n # TODO: Consider using csv module for proper quoting of cell values.\n if usecsvmodule:\n # If using csv module, files should be opened with newline='' to prevent inserting extra '\\r'\n with open(outputfn, 'w', newline='') as fout:\n csvwriter = csv.writer(fout, dialect=dialect)\n rows = ((cell.value if cell.value is not None else \"\" for cell in row) for row in ws)\n # No, simply passing ws doesn't work because we need cell.value not cell.\n csvwriter.writerows(rows) # Returns None\n print(\" - %04s rows written to file %s.\" % (len(tuple(ws.rows)), outputfn))\n else:\n with open(outputfn, 'w') as fout:\n nbytes = fout.write(\"\\n\".join(\n sep_out.join(str(cell.value if cell.value is not None else \"\") for cell in row) for row in ws) + '\\n')\n nbytes_tot += nbytes\n print(\" - %04s bytes written to file %s.\" % (nbytes, outputfn))\n nfiles += 1\n return nfiles", "def merge_csv_daily(output_filename, path):\n\n # import csv files from folder\n allFiles = glob.glob(path + \"*.csv\")\n\n with open(output_filename, 'wb+') as outfile:\n for i, fname in enumerate(allFiles):\n with open(fname, 'rb') as infile:\n if i != 0:\n infile.readline() # Throw away header on all but first file\n # Block copy rest of file from input to output without parsing\n shutil.copyfileobj(infile, outfile)\n # print(fname + \" has been imported.\")\n\n # adding MissingObs column back:\n df = pd.read_csv(output_filename, header=0, sep=',', index_col=[0,1], parse_dates=False)\n df.insert(loc=3, column='MissingObs', value=np.zeros((df.shape[0], )))\n df.to_csv(output_filename, sep=',')\n\n return output_filename", "def backup_csv():\n for file_name in os.listdir():\n if \".csv\" in file_name:\n print(\"There shouldn't be any .csv files in your directory. We found .csv files in your directory.\")\n directory = os.getcwd()\n try:\n os.mkdir(directory + \"/backup/\")\n except:\n print(\"Backup folder exists.\")\n timestamp = datetime.now()\n shutil.move(file_name, directory + \"/backup/\" + str(timestamp) + \"-\" + file_name)", "def to_csv(self, dir_path, **kwargs):\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n\n for name, table in self.items():\n path = os.path.join(dir_path, '%s.csv' % name)\n\n table.to_csv(path, **kwargs)", "def make_csv(idir, dates):\n for path, dirs, files in os.walk(idir):\n for date in dates:\n # first loop over output dir\n if not path.endswith(str(date)):\n continue\n arr = path.split('/')\n oname = '%s-%s.csv' % (arr[-2], arr[-1])\n print(\"write %s\" % oname)\n with open(oname, 'w') as ostream:\n headers = None\n for ifile in files:\n if 'part-' not in ifile:\n continue\n iname = os.path.join(path, ifile)\n with open(iname) as istream:\n first_line = istream.readline()\n if not headers:\n headers = first_line\n ostream.write(headers)\n while True:\n line = istream.readline().replace('\"', '')\n if not line:\n break\n ostream.write(line)", "def convert_all_in_bmp(self, path, new_path):\n DbWorker.mkdir(new_path)\n for i in os.listdir(path):\n self.convert_and_save_image(path+'/'+i, new_path)", "def get_csv_in_path(self, path):\n files = os.listdir((path))\n return files", "def save(self):\n\n date = datetime.utcnow().strftime(\"%Y-%m-%d\")\n directory = '%s/xls/%s/' % (PROJECT_DIR, date)\n _file = directory + '/' + self.xls.name\n if not os.path.exists(directory):\n os.makedirs(directory)\n with open(_file, 'wb+') as destination:\n [destination.write(chunk) for chunk in self.xls.chunks()]\n self.batch(_file)", "def compress_csv():\n if not os.path.exists(PATH_CSV):\n os.makedirs(PATH_CSV)\n os.chdir(PATH_CSV)\n try:\n zip_csv = ZipFile(SITE_NAME + '_' + DATE + '_csv.zip', 'a')\n for file in glob.glob(\"*\" + DATE + \"*\" + \"csv\"):\n zip_csv.write(file)\n os.remove(file)\n log.info(f\"Compressing {str(OBSERVATION)} item(s)\")\n except Exception as e:\n log.error('Error when compressing csv')\n log.info(type(e).__name__ + str(e))\n os.chdir(PROJECT_PATH)", "def make_up(self, base_path='./data/'):\n for csv_file_path in [f\"{base_path}{_}\" for _ in os.listdir(base_path)]:\n self.append_file(csv_file_path)", "def to_csv(self, path):\n for table in ['datasets', 'dataruns', 'hyperpartitions', 'classifiers']:\n df = pd.read_sql('SELECT * FROM %s' % table, self.session.bind)\n df.to_csv(os.path.join(path, '%s.csv' % table), index=False)", "def to_csv(self, path):\n results = self.all()\n if self.stop_check is not None and self.stop_check():\n return\n results.to_csv(path)", "def collect_data(folder):\n folder = pathlib.Path(folder)\n cases = []\n for case_folder in folder.iterdir():\n print(f'start collecting data for location {case_folder.name}')\n for tr_folder in case_folder.iterdir():\n case = calculate_values(tr_folder)\n cases.append(case)\n \n df = pd.DataFrame(cases)\n print(folder.parent.joinpath(f'{folder.stem}.csv'))\n df.to_csv(folder.parent.joinpath(f'{folder.stem}.csv'), index=False)", "def output(df, path=\"./outputs\", file=\"output\"):\n # if output directory does not already exist, make it\n if not os.path.isdir(path):\n os.makedirs(path)\n\n # check that the user has included file extension, if so, remove it\n if '.csv' in file:\n file = file.replace('.csv', '')\n\n # merge path and file\n full_path = os.path.join(path, f\"{file}.csv\")\n \n while True:\n try:\n df.to_csv(full_path, sep='|', index=False)\n # update user\n print(f\"{file} data issues saved to \"\n f\"'{full_path}'.\")\n # if data saved successfully we break the while loop\n break\n except PermissionError:\n # user or another has file open, request to close or rename\n rename = input(f\"'{full_path}' is open, please close and press <Enter>\"\n \" or type a new filename (and press <Enter>).\")\n if rename.strip() == '':\n pass\n elif '.csv' in rename.strip():\n full_path = os.path.join(path, rename) # merge path and file\n else:\n full_path = os.path.join(path, rename+'.csv')", "def add_excel(path=None, names=None):\r\n walker = WalkUserData()\r\n fnamelist = walker.dir_process(1, path, style=\"fnamelist\")" ]
[ "0.69403607", "0.6792918", "0.6768217", "0.61681294", "0.5829117", "0.57755077", "0.57054704", "0.5697257", "0.56801313", "0.5591886", "0.55775934", "0.5491799", "0.5417065", "0.53639287", "0.53344417", "0.5266534", "0.5203592", "0.51408166", "0.5134812", "0.50818545", "0.5060229", "0.50312746", "0.5023172", "0.50018716", "0.49529588", "0.49035075", "0.48926243", "0.4878778", "0.48717308", "0.48620835" ]
0.74483657
0
Distributes values in ranks column into columns created based on unique values in programs column.
def fix_ranks(df, ranks='Current Ranks', programs='Programs'): # Create columns based on unique program values if programs in df: unique_programs = set(df[programs].unique()) if np.nan in unique_programs: unique_programs.remove(np.nan) for value in unique_programs.copy(): val_list = value.split(', ') if len(val_list) > 1: for comma_split_value in value.split(', '): if comma_split_value not in unique_programs: unique_programs.add(comma_split_value) unique_programs.remove(value) for x in unique_programs: if x not in df.columns.values: df[x] = "" # Assign Ranks to respective columns for index, x in df[ranks].iteritems(): new_col = str(df.iloc[index][programs]) x = str(x) # If there are commas if ',' in new_col or x: new_col = new_col.split(', ') x = x.split(', ') for rank, col in zip(x, new_col): df.at[index, col] = rank # No commas else: if df.at[index, col].notnull(): print('value already exists: ' + df.loc[index, col]) else: df.set_value(index, new_col, x) # Get rid of 'nan' in cells df[df == 'nan'] = np.nan return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalized_rankings(df, columns=['Lust', 'Envy', 'Greed', 'Sloth', 'Wrath',\n 'Pride', 'Gluttony']):\n df[columns] = (df[columns] - 4) / 2 # hard coding n=7 case for now", "def ranksByProgram(self):\n\n # Prompts user and gets a set of values\n ranks, programs = Model.RanksByProgramsDialogBox.getResults(self.getCurrentPanda(), self)\n\n # Passes values to PandaModel to be validated and acted upon\n self.getCurrentPanda().ranksByPrograms(ranks, programs)", "def table_rank_dict(rank_dict, n_top_genes=30, order=1, output_values=False):\n data = {}\n for g, r in rank_dict.items():\n d = [k for k in r.keys()][::order]\n data[g] = d[:n_top_genes]\n if output_values:\n dd = [v for v in r.values()][::order]\n data[g + \"_values\"] = dd[:n_top_genes]\n return pd.DataFrame(data=data)", "def impute(data, ranks):\n rank_dict = {}\n for i, split in enumerate(ranks):\n for j, runner in enumerate(split):\n if runner['split_mins'] == '--':\n adj_rank = find_rank(runner['runner_idx'], ranks[i+1])\n runner['rank'] = adj_rank\n split[i][j] = runner\n\n srt_splits = sorted(split, key=lambda t: t['rank'])\n for r, runner in enumerate(srt_splits):\n runner['rank'] = r\n ridx = runner['runner_idx']\n label = runner['split_label']\n key = '{0}_{1}'.format(ridx, label)\n rank_dict[key] = runner\n\n for runner_idx, runner in enumerate(data):\n runner['name'] = name_format(runner['name'])\n year = int(runner['year'])\n splits = runner['splits']\n for n, split in enumerate(splits):\n key = '{0}_{1}'.format(runner_idx, split['split_label'])\n split['split_place'] = rank_dict[key]['rank']\n split['year'] = year\n\n return data", "def addRankToLine(self,cols,rank):\n line = \"\"\n ann = \";RankScore=\" + str(self.family_id) + \":\" + str(rank)\n for c in cols:\n line = line + c\n if cols.index(c) == 7: # 0-based index\n line = line + ann\n if cols.index(c) < len(cols):\n line = line + \"\\t\"\n print(line)", "def _sub_ranker_RN(self,numbers_frequency):\n rank_dictionary={'42':3,'32':4,'33':7,'23':8,'24':9,'15':10}\n\n #in this subspace where sequences(of 5 cards) and repetead suits (of 5 cards) are not posible\n #there is a relation between , max frequency and number of different numbers (freq>0) with the rank\n\n case=str(max(numbers_frequency))+str(len(numbers_frequency.loc[numbers_frequency>0]))\n\n return rank_dictionary[case]", "def _compute_ranks(df, lower_better=True):\n # return df.rank(axis=1, numeric_only=True, ascending=lower_better)\n return df.rank(axis=1, numeric_only=True, ascending=lower_better, method='min')", "def _apply_rank(U, S, VT, r, verbose=False):\n if r is None:\n r = len(S)\n S_r = S[:r]\n U_r = U[:, :r]\n VT_r = VT[:r]\n if verbose:\n print(\"Rank:\", r, \"SVD shape:\", U_r.shape, S_r.shape, VT_r.shape)\n return U_r, S_r, VT_r", "def _graph_ranks(avranks, names, p_values, cd=None, cdmethod=None, lowv=None, highv=None, highlight=None,\n width=6, textspace=1, reverse=False, filename=None, labels=False, **kwargs):\n width = float(width)\n textspace = float(textspace)\n \n def lloc(_list, n):\n \"\"\"\n List location in list of list structure.\n Enable the use of negative locations:\n -1 is the last element, -2 second last...\n \"\"\"\n if n < 0:\n return len(_list[0]) + n\n return n\n \n def nth(_list, n):\n \"\"\"\n Returns only nth elemnt in a list.\n \"\"\"\n n = lloc(_list, n)\n return [a[n] for a in _list]\n\n def mxrange(lr):\n \"\"\"\n Multiple xranges. Can be used to traverse matrices.\n This function is very slow due to unknown number of\n parameters.\n >>> mxrange([3,5])\n [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)]\n >>> mxrange([[3,5,1],[9,0,-3]])\n [(3, 9), (3, 6), (3, 3), (4, 9), (4, 6), (4, 3)]\n \"\"\"\n if len(lr):\n yield ()\n else:\n # it can work with single numbers\n index = lr[0]\n if isinstance(index, int):\n index = [index]\n for a in range(*index):\n for b in mxrange(lr[1:]):\n yield tuple([a] + list(b))\n\n sums = avranks\n\n nnames = names\n ssums = sums\n\n if lowv is None:\n lowv = min(1, int(math.floor(min(ssums))))\n if highv is None:\n highv = max(len(avranks), int(math.ceil(max(ssums))))\n\n cline = 0.4\n\n k = len(sums)\n\n linesblank = 0\n scalewidth = width - 2 * textspace\n\n def rankpos(rank):\n if not reverse:\n a = rank - lowv\n else:\n a = highv - rank\n return textspace + scalewidth / (highv - lowv) * a\n\n distanceh = 0.25\n\n cline += distanceh\n\n # calculate height needed height of an image\n minnotsignificant = max(2 * 0.2, linesblank)\n height = cline + ((k + 1) / 2) * 0.2 + minnotsignificant\n\n fig = plt.figure(figsize=(width, height*1.05))\n fig.set_facecolor('white')\n ax = fig.add_axes([0, 0, 1, 1]) # reverse y axis\n ax.set_axis_off()\n\n hf = 1. / height # height factor\n wf = 1. / width\n\n def hfl(_list):\n return [a * hf for a in _list]\n\n def wfl(_list):\n return [a * wf for a in _list]\n\n # Upper left corner is (0,0).\n ax.plot([0, 1], [0, 1], c=\"w\")\n ax.set_xlim(0, 1)\n ax.set_ylim(1, 0)\n\n def line(l, color='k', **kwargs):\n \"\"\"\n Input is a list of pairs of points.\n \"\"\"\n ax.plot(wfl(nth(l, 0)), hfl(nth(l, 1)), color=color, **kwargs)\n\n def text(x, y, s, *args, **kwargs):\n ax.text(wf * x, hf * y, s, *args, **kwargs)\n\n line([(textspace, cline), (width - textspace, cline)], linewidth=2)\n\n bigtick = 0.3\n smalltick = 0.15\n linewidth = 2.0\n linewidth_sign = 4.0\n\n tick = None\n for a in list(np.arange(lowv, highv, 0.5)) + [highv]:\n tick = smalltick\n if a == int(a):\n tick = bigtick\n line([(rankpos(a), cline - tick / 2),\n (rankpos(a), cline)],\n linewidth=2)\n\n for a in range(lowv, highv + 1):\n text(rankpos(a), cline - tick / 2 - 0.05, str(a),\n ha=\"center\", va=\"bottom\", size=16)\n\n k = len(ssums)\n\n def filter_names(name):\n return name\n\n space_between_names = 0.24\n\n for i in range(math.ceil(k / 2)):\n chei = cline + minnotsignificant + i * space_between_names\n if nnames[i] == highlight:\n line([(rankpos(ssums[i]), cline),\n (rankpos(ssums[i]), chei),\n (textspace - 0.1, chei)],\n linewidth=linewidth, color='red')\n else:\n line([(rankpos(ssums[i]), cline),\n (rankpos(ssums[i]), chei),\n (textspace - 0.1, chei)],\n linewidth=linewidth)\n if labels:\n text(textspace + 0.3, chei - 0.075,\n format(ssums[i], '.4f'), ha=\"right\", va=\"center\", size=10)\n if nnames[i] == highlight:\n text(textspace - 0.2, chei,\n filter_names(nnames[i]), ha=\"right\", va=\"center\", size=18, color='red')\n else:\n text(textspace - 0.2, chei,\n filter_names(nnames[i]), ha=\"right\", va=\"center\", size=18)\n\n for i in range(math.ceil(k / 2), k):\n chei = cline + minnotsignificant + (k - i - 1) * space_between_names\n if nnames[i] == highlight:\n line([(rankpos(ssums[i]), cline),\n (rankpos(ssums[i]), chei),\n (textspace + scalewidth + 0.1, chei)],\n linewidth=linewidth, color='red')\n else:\n line([(rankpos(ssums[i]), cline),\n (rankpos(ssums[i]), chei),\n (textspace + scalewidth + 0.1, chei)],\n linewidth=linewidth)\n if labels:\n text(textspace + scalewidth - 0.3, chei - 0.075,\n format(ssums[i], '.4f'), ha=\"left\", va=\"center\", size=10)\n if nnames[i] == highlight:\n text(textspace + scalewidth + 0.2, chei, filter_names(nnames[i]),\n ha=\"left\", va=\"center\", size=18, color='red')\n else:\n text(textspace + scalewidth + 0.2, chei, filter_names(nnames[i]),\n ha=\"left\", va=\"center\", size=18)\n start = cline + 0.2\n side = -0.02\n height = 0.1\n\n # draw no significant lines\n # get the cliques\n cliques = _form_cliques(p_values, nnames)\n achieved_half = False\n print(nnames)\n for clq in cliques:\n if len(clq) == 1:\n continue\n print(clq)\n min_idx = np.array(clq).min()\n max_idx = np.array(clq).max()\n if min_idx >= len(nnames) / 2 and achieved_half == False:\n start = cline + 0.25\n achieved_half = True\n line([(rankpos(ssums[min_idx]) - side, start),\n (rankpos(ssums[max_idx]) + side, start)],\n linewidth=linewidth_sign)\n start += height", "def get_rank(ngrams_set, path):\r\n pmi_sorted = sorted(set([float(tup[3]) for tup in ngrams_set]), reverse=True)\r\n pmi_ranking = {}\r\n for i in range(len(pmi_sorted)):\r\n pmi_ranking[pmi_sorted[i]] = i + 1\r\n\r\n tscore_sorted = sorted(set([float(tup[4]) for tup in ngrams_set]), reverse=True)\r\n tscore_ranking = {}\r\n for i in range(len(tscore_sorted)):\r\n tscore_ranking[tscore_sorted[i]] = i + 1\r\n\r\n logDice_sorted = sorted(set([float(tup[2]) for tup in ngrams_set]), reverse=True)\r\n logDice_ranking = {}\r\n for i in range(len(logDice_sorted)):\r\n logDice_ranking[logDice_sorted[i]] = i + 1\r\n\r\n with open(path[:-4] + '_ranked.csv', 'a', encoding='utf-8') as f:\r\n f.write('\\t'.join(['collocation_tags', 'raw_frequency', 'log_Dice', 'PMI', 'tscore',\r\n 'pmi_rank', 'tsc_rank', 'logD_rank', 'summary t-sc + pmi', 'summary logD + pmi']) + '\\n')\r\n for collocation_scored in ngrams_set:\r\n collocation_tags, raw_frequency, log_Dice, PMI, tscore = collocation_scored\r\n tsc_rank, pmi_rank, logd_rank, summarytscpmi, summarylogdpmi = ranked(pmi_ranking, tscore_ranking,\r\n logDice_ranking, tscore, PMI, log_Dice)\r\n f.write('\\t'.join([collocation_tags, raw_frequency,\r\n log_Dice, PMI, tscore, str(pmi_rank), str(tsc_rank), str(logd_rank),\r\n str(summarytscpmi), str(summarylogdpmi)]) + '\\n')", "def rank_teams_of_curr_run(curr_score, curr_ranking):\n for place in curr_ranking:\n curr_place = get_key_with_max_value(curr_score)\n curr_ranking[place] = curr_ranking[place].__add__([curr_place])\n curr_score.pop(curr_place)\n return curr_ranking", "def independentCluster(table, p_t):\n \n apps = list(table.columns.values)\n n0 = len(apps)\n nn = 0\n count = 0\n while(nn != n0):\n \n n0 = len(apps)\n apps_compliment = []\n \n while len(apps) != 0:\n a1 = [apps[0]] if isinstance(apps[0], str) else apps[0]\n max_p, max_a, max_ai = 0, [], None\n apps = apps[1:]\n\n for i, a2 in enumerate(apps):\n if isinstance(a2, str):\n a2 = [a2] \n table1 = table[a1+a2]\n ret = stats.chi2_contingency(observed=table1)\n p = ret[1]\n if max_p < p:\n max_p, max_a, max_ai = p, a2, i\n \n tmp = a1\n if max_p > p_t:\n tmp += max_a\n if max_ai != None:\n apps.remove(apps[max_ai])\n\n apps_compliment.append(tmp)\n count += 1\n #if count == 5:\n #eee\n apps = apps_compliment \n nn = len(apps_compliment)\n\n app_imrs = []\n for app in apps:\n inner_max, inner_min, inner_mean, inner_p = 0, 0, 0, 0\n \n for a in app:\n inner_max += table[a].max()\n inner_min += table[a].min()\n inner_mean += table[a].mean()\n inner_imr = float(inner_max-inner_min)/inner_max\n \n if len(app) > 1:\n table1 = table[app]\n ret = stats.chi2_contingency(observed=table1)\n inner_p = ret[1]\n else:\n inner_p = 1.0\n app_imrs.append((app, inner_p, inner_imr))\n return app_imrs", "def recommendation_ranking(self):\n iu = self.final_recommendation_score_matrix()\n new_iu = []\n for row in iu:\n li = []\n temp = row\n if self.product != \"dist\":\n temp = -np.sort(-temp)\n for element in row:\n li.append(binary_search_opp(temp,element)+1) \n else:\n temp = np.sort(temp)\n for element in row:\n li.append(np.searchsorted(temp,element)+1)\n new_iu.append(li)\n return np.array(new_iu)", "def place_at_splits(data):\n groups = defaultdict(list)\n for runner_idx, runner in enumerate(data):\n splits = runner['splits']\n for split in splits:\n split['runner_idx'] = runner_idx\n groups[split['split_dist']].append(split)\n\n ranks = []\n srt_keys = sorted(groups, key=groups.get)\n for key in srt_keys:\n group = groups[key]\n srt_group = sorted(group, key=lambda t: t['split_mins'])\n ranked_group = []\n for rank, split in enumerate(srt_group):\n split['rank'] = rank\n ranked_group.append(split)\n ranks.append(ranked_group)\n\n return data, ranks", "def adjusted_ranking(df_ranking):\n\n # Creation of a list containing the Produtor, ordered by the maximum of Pontuacao that the Produtor gets in any\n # of its pairs\n\n # Transform the structure and contents of the Ranking DataFrame into something that can be useful\n df_ranking_usable = df_ranking.groupby([\"produtor\"]).max()\n df_ranking_usable = df_ranking_usable.reset_index()\n\n df_ranking_usable = df_ranking_usable[{\"produtor\", \"pontuacao\"}]\n df_ranking_usable = df_ranking_usable.sort_values(\"pontuacao\", ascending=False)\n\n # Create the list\n lista_prod = df_ranking_usable[\"produtor\"].tolist()\n\n # Create a DataFrame that has the ranking sorted by the name of the Produtor and that\n # for each Produtor, is sorted by the Pontuacao each of its Produtos has\n\n sorted_ranking = df_ranking.sort_values(\n by=[\"produtor\", \"pontuacao\"], ascending=False\n )\n\n # Create the final Adjusted Ranking\n\n ranking_ajustado = []\n\n for produtor in lista_prod:\n auxiliary_dict = {}\n auxiliary_dict[\"produtor\"] = produtor\n auxiliary_dict[\"produtos\"] = (\n sorted_ranking.loc[sorted_ranking[\"produtor\"] == produtor][\n {\"produto\", \"pontuacao\"}\n ]\n ).to_dict(\"records\")\n ranking_ajustado.append(auxiliary_dict)\n\n return ranking_ajustado", "def language_stats_to_dataframe(results, n_runs, n_gens, possible_form_lengths):\n\n if len(possible_form_lengths) == 1:\n n_language_classes = 4\n else:\n n_language_classes = 7 #TODO: or should this be 6 (i.e. collapsing the two different reduplication strategies?)\n\n column_proportion = np.array(results)\n\n if n_language_classes == 4 and column_proportion.shape[2] > n_language_classes:\n column_proportion_compositional_summed = np.zeros((n_runs, n_gens, n_language_classes))\n for r in range(len(column_proportion_compositional_summed)):\n for g in range(len(column_proportion_compositional_summed[0])):\n column_proportion_compositional_summed[r][g] = np.array([column_proportion[r][g][0], column_proportion[r][g][1], column_proportion[r][g][2]+column_proportion[r][g][3], column_proportion[r][g][4]])\n column_proportion = column_proportion_compositional_summed.flatten()\n\n else:\n column_proportion = column_proportion.flatten()\n\n column_runs = []\n for i in range(n_runs):\n for j in range(n_gens):\n for k in range(n_language_classes):\n column_runs.append(i)\n column_runs = np.array(column_runs)\n\n column_generation = []\n for i in range(n_runs):\n for j in range(n_gens):\n for k in range(n_language_classes):\n column_generation.append(j)\n column_generation = np.array(column_generation)\n\n column_type = []\n for i in range(n_runs):\n for j in range(n_gens):\n if len(possible_form_lengths) == 1:\n column_type.append('degenerate')\n column_type.append('holistic')\n column_type.append('compositional')\n column_type.append('other')\n else:\n column_type.append('degenerate')\n column_type.append('holistic')\n column_type.append('holistic_diversify_signal')\n column_type.append('compositional')\n column_type.append('compositional_reduplicate_segments')\n column_type.append('compositional_reduplicate_whole_signal')\n column_type.append('other')\n\n data = {'run': column_runs,\n 'generation': column_generation,\n 'proportion': column_proportion,\n 'class': column_type}\n\n lang_class_prop_over_gen_df = pd.DataFrame(data)\n\n return lang_class_prop_over_gen_df", "def _get_rank_values(self):\n \n info_gains = {}\n \n #caluclate info gain\n for col in self.cat_cols:\n info_gains[col] = self._get_info_gain(col)\n \n return info_gains", "def _rank(measure):\n sort_idx = np.argsort(-measure)\n ranks = np.empty(len(measure), int)\n ranks[sort_idx] = np.arange(1, len(measure)+1)\n return ranks", "def reduce(self, rank=None, comm=None):\n\n if rank is not None:\n gdata = comm.gather(self.lumcounts, root=0)\n\n\n if rank==0:\n gshape = [self.lumcounts.shape[i] for i in range(len(self.lumcounts.shape))]\n gshape[0] = self.njacktot\n\n self.lumcounts = np.zeros(gshape)\n jc = 0\n #iterate over gathered arrays, filling in arrays of rank==0\n #process\n for g in gdata:\n if g is None: continue\n nj = g.shape[0]\n self.lumcounts[jc:jc+nj,:,:,:] = g\n\n jc += nj\n\n area = self.ministry.galaxycatalog.getArea(jackknife=True)\n vol = np.zeros((self.njacktot, self.nzbins))\n dl = (self.xbins[1:] - self.xbins[:-1]).reshape(1,-1,1,1)\n for i in range(self.nzbins):\n vol[:,i] = self.ministry.calculate_volume(area, self.zbins[i], self.zbins[i+1])\n\n if (not self.lightcone) & (self.jtype is not None):\n vol *= (self.njacktot-1) / self.njacktot\n\n self.jlumcounts = self.jackknife(self.lumcounts, reduce_jk=False)\n self.jluminosity_function = self.jlumcounts / vol.reshape(self.njacktot, 1, 1, -1) / dl\n\n self.luminosity_function = np.sum(self.jluminosity_function, axis=0) / self.njacktot\n self.varluminosity_function = np.sum((self.jluminosity_function - self.luminosity_function) ** 2, axis=0) * (self.njacktot - 1) / self.njacktot\n self.y = self.luminosity_function\n self.ye = np.sqrt(self.varluminosity_function)\n else:\n if self.jtype is not None:\n area = self.ministry.galaxycatalog.getArea(jackknife=True)\n else:\n area = self.ministry.galaxycatalog.getArea(jackknife=False)\n\n vol = np.zeros((self.njacktot, self.nzbins))\n for i in range(self.nzbins):\n vol[:,i] = self.ministry.calculate_volume(area, self.zbins[i], self.zbins[i+1])\n\n if (not self.lightcone) & (self.jtype is not None):\n vol *= (self.njacktot-1) / self.njacktot\n\n self.jlumcounts = self.jackknife(self.lumcounts, reduce_jk=False)\n self.jluminosity_function = self.jlumcounts / vol.reshape(self.njacktot, 1, 1, -1)\n\n self.luminosity_function = np.sum(self.jluminosity_function, axis=0) / self.njacktot\n self.varluminosity_function = np.sum((self.jluminosity_function - self.luminosity_function) ** 2, axis=0) * (self.njacktot - 1) / self.njacktot\n self.y = self.luminosity_function\n self.ye = np.sqrt(self.varluminosity_function)", "def get_scaling_results(path_pattern, ranks):\n configs, results = [], []\n for r in ranks:\n result_dir = path_pattern % r\n configs.append(load_config(result_dir))\n results.append(load_result(result_dir).assign(ranks=r))\n samples = np.array([get_num_samples(c,r) for (c,r) in zip(configs, ranks)]) \n times = np.array([compute_mean_time(r) for r in results])\n throughputs = samples / times\n ideal = ranks * throughputs[0]/4 # Change to the GPU/node\n eff = throughputs / ideal\n return pd.DataFrame(dict(ranks=ranks, samples=samples,\n times=times, throughputs=throughputs,\n ideal=ideal, eff=eff))", "def gf2_rank(rows):\r\n rank = 0\r\n while rows:\r\n pivot_row = rows.pop()\r\n if pivot_row:\r\n rank += 1\r\n lsb = pivot_row & -pivot_row\r\n for index, row in enumerate(rows):\r\n if row & lsb:\r\n rows[index] = row ^ pivot_row\r\n return rank", "def getSocialSimilarityMatrix(self, column_map):\n\n\t\tdf = self.getSocialNetwork()\n\n\t\tS = lil_matrix((len(self.topusers), len(column_map)))\n\n\t\tfor index, row in df.iterrows():\n\t\t\tif row[\"friend_id\"] in column_map.keys():\n\t\t\t\tS[ self.topusers.index(row[\"top_user_id\"]), column_map[row[\"friend_id\"]] ] = row[\"weight\"]\n\t\t\telse:\n\t\t\t\tpass\n\t\t\t\t# print(\"User %i did not make any checkins\" %row[\"friend_id\"] )\n\n\t\tfor i in range(len(self.topusers)):\n\t\t\tS[i, column_map[self.topusers[i]]] = S[i, column_map[self.topusers[i]]] +1\n\n\t\tS = S.tocsr()\n\n\t\treturn S", "def __init__(self, applicant_ranks: np.ndarray, program_ranks: np.ndarray,\n program_slots: np.ndarray):\n self.applicant_ranks = applicant_ranks\n self.program_ranks = program_ranks\n self.program_slots = program_slots\n self.applicant_matches = np.full([applicant_ranks.shape[0]], np.nan)\n self.program_matches = np.full([program_ranks.shape[0]], np.nan)", "def ratings_to_matrix(ratings_df, user_col, item_col, rating_col, forced_shape=None):\n users_num = ratings_df.user_id.max() + 1\n items_num = ratings_df.item_id.max() + 1\n \n if forced_shape:\n users_num = max(users_num, forced_shape[0])\n items_num = max(items_num, forced_shape[1])\n \n ratings_mat = np.zeros([users_num, items_num])\n for rating in ratings_df.itertuples():\n ratings_mat[rating[user_col], rating[item_col]] = rating[rating_col]\n \n return ratings_mat", "def ranking_metric(df, method, phenoPos, phenoNeg, classes, ascending): \n \n A = phenoPos\n B = phenoNeg\n df2 = df.T \n df2['class'] = classes\n df_mean= df2.groupby('class').mean().T\n df_std = df2.groupby('class').std().T \n #exclude any zero stds.\n df_mean = df_mean[df_std.sum(axis=1) !=0]\n df_std = df_std[df_std.sum(axis=1) !=0]\n \n if method == 'signal_to_noise':\n sr = (df_mean[A] - df_mean[B])/(df_std[A] + df_std[B])\n elif method == 't_test':\n sr = (df_mean[A] - df_mean[B])/ np.sqrt(df_std[A]**2/len(df_std)+df_std[B]**2/len(df_std) )\n elif method == 'ratio_of_classes':\n sr = df_mean[A] / df_mean[B]\n elif method == 'diff_of_classes':\n sr = df_mean[A] - df_mean[B]\n elif method == 'log2_ratio_of_classes':\n sr = np.log2(df_mean[A] / df_mean[B])\n else:\n logging.error(\"Please provide correct method name!!!\") \n sys.exit()\n sr.sort_values(ascending=ascending, inplace=True)\n df3 = sr.to_frame().reset_index()\n df3.columns = ['gene_name','rank']\n df3['rank2'] = df3['rank']\n\n return df3", "def melt_cluster_matrix(df, name=\"count\"):\n return df.T.reset_index().melt(id_vars=\"cluster\", var_name=\"FBgn\", value_name=name)", "def combine_prop_and_freq_scores(\n self, network_prop_scores, network_freq_scores, raw_or_rank\n ):\n\n for index, network_score_dict in enumerate([network_prop_scores, network_freq_scores]):\n # Only need to order propensities if converting to probability\n # scores via their rank\n if index == 0:\n prop_or_freq = 'propensity'\n if raw_or_rank == 'rank':\n # Have taken -ve logarithm of propensity scores, so lower\n # scores are more likely\n network_score_dict = OrderedDict(sorted(\n network_score_dict.items(), key=itemgetter(1), reverse=True\n ))\n elif index == 1:\n prop_or_freq = 'frequency'\n # Lower scores are more likely (smaller difference between\n # actual and expected frequency distribution)\n network_score_dict = OrderedDict(sorted(\n network_score_dict.items(), key=itemgetter(1), reverse=True\n ))\n\n network_num = np.array(list(network_score_dict.keys()))\n network_scores = np.array(list(network_score_dict.values()))\n\n if index == 0 and raw_or_rank == 'raw':\n (network_num, network_scores, network_prob\n ) = propensity_to_probability_distribution(\n network_num, network_scores\n )\n elif index == 0 and raw_or_rank == 'rank':\n (network_num, network_scores, network_prob\n ) = frequency_to_probability_distribution(\n network_num, network_scores, prop_or_freq\n )\n elif index == 1:\n if self.frequency_dicts != {}:\n (network_num, network_scores, network_prob\n ) = frequency_to_probability_distribution(\n network_num, network_scores, 'propensity'\n ) # Set to \"propensity\" to make sure converts frequency scores into rank values\n else:\n network_prob = np.full(network_num.shape, 0)\n\n if prop_or_freq == 'propensity':\n propensity_array = np.array([copy.deepcopy(network_num),\n copy.deepcopy(network_prob)])\n elif prop_or_freq == 'frequency':\n frequency_array = np.array([copy.deepcopy(network_num),\n copy.deepcopy(network_prob)])\n\n network_fitness_scores = OrderedDict()\n for index_prop, network_num in np.ndenumerate(propensity_array[0]):\n index_prop = index_prop[0]\n index_freq = np.where(frequency_array[0] == network_num)[0][0]\n\n propensity = float(propensity_array[1][index_prop])\n frequency = float(frequency_array[1][index_freq])\n\n probability = ( (self.propensity_weight*propensity)\n + ((1-self.propensity_weight)*frequency))\n network_fitness_scores[network_num] = probability\n\n return network_fitness_scores", "def melt_cluster_rep_matrix(df, name=\"count\"):\n return df.T.reset_index().melt(id_vars=[\"cluster\", \"rep\"], var_name=\"FBgn\", value_name=name)", "def set_rank_order(self):\n for k in self._run:\n self._run[k].sort(key=lambda x:x.get_rank(),reverse=False)\n tot_res = len(self._run[k])\n for r in self._run[k]:\n r.set_score(tot_res - int(r.get_rank()) + 1)\n print r.get_str()", "def sim_ranks(query, database):\n distance = compute_sim(query, database)\n return np.argsort(-distance, axis=0)" ]
[ "0.5437983", "0.5321464", "0.48331884", "0.47549316", "0.46927905", "0.46685165", "0.46200693", "0.45880383", "0.4580909", "0.4577722", "0.4532358", "0.45247898", "0.44939372", "0.44839427", "0.44658518", "0.446214", "0.44235912", "0.44155613", "0.44132406", "0.44088876", "0.44001153", "0.4392279", "0.4389924", "0.43845078", "0.43784758", "0.43559784", "0.4336315", "0.43342414", "0.43272674", "0.4322365" ]
0.6814282
0
Distributes the values in the phones column based on the identifier given in parentheses into either Home, Mobile, or Work.
def split_phones(df, column): df['Work'] = df[column].str.extract(r'(...-...-....)\(W\)', expand=True) df['Mobile'] = df[column].str.extract(r'(...-...-....)\(M\)', expand=True) df['Mobile 2'] = df[column].str.extract(r'...-...-....\(M\).*?(...-...-....)\(M\)', expand=True) df['Mobile 3'] = df[column].str.extract(r'...-...-....\(M\).*?...-...-....\(M\).*?(...-...-....)\(M\)', expand=True) df['Home'] = df[column].str.extract(r'(...-...-....)\(H\)', expand=True) df['Mobile_'] = df[column].str.extract(r'(...-...-....)\(C\)', expand=True) df['Mobile 2_'] = df[column].str.extract(r'...-...-....\(C\).*?(...-...-....)\(C\)', expand=True) df['Mobile 3_'] = df[column].str.extract(r'...-...-....\(C\).*?...-...-....\(C\).*?(...-...-....)\(C\)', expand=True) df['Mobile'] = df['Mobile'].combine_first(df['Mobile_']) df['Mobile 2'] = df['Mobile 2'].combine_first(df['Mobile 2_']) df['Mobile 3'] = df['Mobile 3'].combine_first(df['Mobile 3_']) df.drop([column, 'Mobile_', 'Mobile 2_', 'Mobile 3_'], axis=1, inplace=True) df = remove_non_numeric(df, ['Mobile', 'Mobile 2', 'Mobile 3', 'Work', 'Home']) return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_phone(phone, phone_mapping):\n results = []\n for iphone in re.split(',|;',phone):\n patterns = phone_pattern_re.search(iphone)\n if patterns:\n numbers = patterns.groups()\n if numbers[0] == \"852\":\n results.append(re.compile(r'\\D?(\\d{0,4}?)\\D{0,2}(\\d{4})\\D?(\\d{4})$', iphone))\n elif numbers[0] in phone_mapping:\n results.append (\"+852\"+ \" \" + numbers[1] + numbers[2])\n return ';'.join(results)", "def normalize(phone):\n d = re.sub('\\D', '', phone)\n return '+7 (%s) %s-%s-%s' % (d[1:4], d[4:7], d[7:9], d[9:11])", "def phone_brands(df):\n brands = {}\n brands['others'] = 0\n # get unique devices\n uniq_phone_brands = df.device_brand_name.unique().tolist()\n\n for x in uniq_phone_brands:\n if df[df.device_brand_name==x].device_brand_name.count() > 300:\n brands[x] = df[df.device_brand_name==x].device_brand_name.count()\n else:\n brands['others'] = brands['others'] + df[df.device_brand_name==x].device_brand_name.count()\n \n return brands", "def phone_parse(neighbor):\n mgmt_ip = neighbor[mgmt_ip_s]\n hostname = neighbor[hostname_s].split('.')[0]\n if nxos:\n sysname = neighbor['sysname']\n if sysname != '':\n hostname = sysname\n if mgmt_ip == '':\n mgmt_ip = neighbor['interface_ip']\n l_intf = neighbor['local_port']\n intf = re.findall(r'.{2}', l_intf)[0] + re.findall(r'\\d.+', l_intf)[0]\n macreg = re.findall(r'.{4}', hostname.replace('SEP', ''))\n mac_address = f'{macreg[0]}.{macreg[1]}.{macreg[2]}'.lower()\n voice_vlan = 'None'\n software_version = neighbor[version_s].replace('.loads', '')\n platform = neighbor['platform']\n for switchport in switchports:\n if switchport['interface'] == intf:\n for mac_addr in mac_addrs:\n if mac_addr['vlan'] == switchport['voice_vlan']:\n voice_vlan = mac_addr['vlan']\n break\n break\n if platform.__contains__('Cisco IP Phone'):\n platform = neighbor['platform'].replace('Cisco IP Phone ', '')\n else:\n platform = neighbor['platform']\n phone = {\n 'hostname': hostname,\n 'neighbor': {\n 'hostname': session.hostname,\n 'ip_address': session.ip_address,\n 'remote_intf': l_intf\n },\n 'ip_address': mgmt_ip,\n 'mac_addr': mac_address,\n 'voice_vlan': voice_vlan,\n 'software_version': software_version,\n 'model': platform\n }\n self.phones.append(phone)", "def combine_phenotypes(df_ph_full, df_ph_perimeter):\n key_cols = ['well', 'tile', 'cell']\n\n val_cols = [\n \"dapi_gfp_nuclear_corr\",\n \"dapi_nuclear_int\",\n \"dapi_nuclear_max\",\n \"dapi_nuclear_median\",\n \"gfp_nuclear_int\",\n \"gfp_nuclear_max\",\n \"gfp_nuclear_mean\",\n \"gfp_nuclear_median\",\n \"x\",\n \"y\",\n \"dapi_gfp_cell_corr\",\n \"gfp_cell_mean\",\n \"gfp_cell_median\",\n \"gfp_cell_int\"\n ]\n \n df_ph_perimeter = (df_ph_perimeter\n .set_index(key_cols)[val_cols]\n .rename(columns=lambda x: x + '_perimeter'))\n \n return df_ph_full.join(df_ph_perimeter, on=key_cols)", "def extract_phone(self, response):\n\n telephones = response.xpath('//*/a[contains(@href,\"tel:\")]/text()').extract()\n if telephones:\n telephones = [phone.replace(\"Tel:\", \"\").replace(\"Phone:\", \"\").replace(\"Handynummer:\", \"\").strip() for phone\n in telephones if phone]\n return \",\".join(filter(None, telephones)).strip()\n else:\n telephones = response.xpath('//*/p[contains(text(),\"nummer\")]/text()').extract()\n if telephones:\n telephones = [phone.replace(\"Tel:\", \"\").replace(\"Phone:\", \"\").replace(\"Handynummer:\", \"\").strip() for\n phone in telephones if (not \"Firmanummer\" in phone)]\n return \",\".join(filter(None, telephones)).strip()\n return \"\"", "def test_list_common_area_phones(self):\n pass", "def scrape_phones(self) -> None:\n\n phone_model, phone_ram, phone_storage, phone_processor, phone_camera, phone_price = ([] for i in range(6))\n \n phones_url = self.get_phones_url()\n fetch_single_phone = self.get_single_phone()\n\n with concurrent.futures.ThreadPoolExecutor() as executor:\n results = executor.map(fetch_single_phone, phones_url)\n \n for result in results:\n phone_price.append(result[0])\n phone_model.append(result[1])\n phone_ram.append(result[2])\n phone_storage.append(result[3])\n phone_processor.append(result[4])\n phone_camera.append(result[5]) \n\n df = pd.DataFrame(\n data=zip(\n phone_price, \n phone_model, \n phone_ram, \n phone_storage, \n phone_processor, \n phone_camera),\n columns=['price', 'model', 'ram', 'storage', 'processor', 'camera']\n )\n\n df['brand'] = self.__brand\n df['condition'] = self.__condition_name\n\n return df.to_csv(f\"{self.__brand}_{self.__condition_name}_data.csv\", index=False)", "def telephone(value, arg=None):\n \n # Normalise a number\n value = value.replace(\" \", \"\").replace(\"-\", \"\")\n if value.startswith(\"0\"):\n value = \"+44\" + value[1:]\n normalised = value\n \n # Check if it's a number which is formatted in a special way\n if normalised in UNUSUAL_NUMBERS:\n value = UNUSUAL_NUMBERS[normalised]\n else:\n # Figure out how to format that number\n \n # Convert UK numbers into national format\n if value.startswith(\"+44\"):\n value = \"0\" + value[3:]\n \n # Now apply rules on how to split up area codes\n if value[:8] in ('01332050', '01382006'):\n # Direct dial only\n value = value[:5] + \" \" + value[5:]\n elif value[:7] in ('0141005', '0117101') or value[:6] in ('011800',):\n # Direct dial only\n value = value[:4] + \" \" + value[4:7] + \" \" + value[7:]\n elif value[:7] in ('0200003',):\n # Direct dial only\n value = value[:3] + \" \" + value[3:7] + \" \" + value[7:]\n elif value.startswith('01'):\n if value[2] == '1' or value[3] == '1':\n # 4 digit area codes\n area_code = value[:4]\n local_part = value[4:7] + \" \" + value[7:]\n elif value[:6] in (\n '013873', # Langholm\n '015242', # Hornby\n '015394', # Hawkshead\n '015395', # Grange-over-Sands\n '015396', # Sedbergh\n '016973', # Wigton\n '016974', # Raughton Head\n '016977', # Brampton\n '017683', # Appleby\n '017684', # Pooley Bridge\n '017687', # Keswick\n '019467', # Gosforth\n ):\n # 6 digit area codes\n area_code = value[:4] + \" \" + value[4:6]\n local_part = value[6:]\n else:\n # 5 digit\n area_code = value[:5]\n local_part = value[5:]\n \n value = \"(%s) %s\" % (area_code, local_part)\n \n elif value.startswith('02'):\n # 3 digit area codes\n value = \"(%s) %s %s\" % (value[:3], value[3:7], value[7:])\n \n elif value.startswith('0500') or value.startswith('0800'):\n # direct dial - 4 digit prefix, short following\n value = \"%s %s\" % (value[:4], value[4:])\n \n elif value.startswith('03') or value.startswith('08') or value.startswith('09'):\n # direct dial - 4 digit prefix\n value = \"%s %s %s\" % (value[:4], value[4:7], value[7:])\n \n elif value.startswith('05') or value.startswith('070'):\n # direct dial - 3 digit prefix\n value = \"%s %s %s\" % (value[:3], value[3:7], value[7:])\n \n elif value.startswith('07'):\n # direct dial - 5 digit prefix, short following\n value = \"%s %s\" % (value[:5], value[5:])\n\n # Now apply University rules:\n if value[:10] in ('(01865) 27', '(01865) 28', '(01865) 43', '(01865) 61'):\n # Oxford - list of internal number prefixes here:\n # http://www.oucs.ox.ac.uk/telecom/directories/intdiraccess.xml\n value = \"(01865 \" + value[8] + \")\" + value[9:]\n\n if arg == 'nolink':\n return value\n else:\n return mark_safe('<a href=\"tel:%s\">%s</a>' % (normalised, value))", "def tidy_telephone(telephone):\n junk = ['none', 'none1', 'na', 'n/a', 'same', 'yes', 'cell', 'offsite']\n telephone = telephone.replace('xxx-xxx-xxxx', '')\n telephone = telephone.replace('ext', ' x')\n telephone = telephone.replace(' cell', '')\n telephone = telephone.replace('\"', '')\n telephone = telephone.replace('%', '')\n if telephone in junk:\n return ''\n else:\n return telephone", "def get_phone_data(page):\n phone_data = dict()\n soup = BeautifulSoup(page.content, features='html.parser')\n phone_name = soup.find('h1', class_=\"specs-phone-name-title\").text\n logger.info(f'Extract {phone_name} data from {page.url}')\n phone_data[phone_name] = dict()\n for table in soup.find('div', id='specs-list').find_all('table'):\n title = table.find('th').text\n logger.debug(f'Extract {phone_name}: {title}')\n phone_data[phone_name][title] = dict()\n for sub_table in table.find_all('tr'):\n if sub_table:\n key = sub_table.find('td', class_='ttl')\n val = sub_table.find('td', class_='nfo')\n if key and key.text != NON_BREAK_SPACE:\n val = _get_table_val(val)\n phone_data[phone_name][title][key.text.replace(NON_BREAK_SPACE, ' ')] = val\n\n else:\n if val:\n val = _get_table_val(val)\n try:\n phone_data[phone_name][title]['other'].append(val)\n except KeyError:\n phone_data[phone_name][title]['other'] = []\n phone_data[phone_name][title]['other'].append(val)\n\n return phone_data", "def concat_compartment(df):\n if 'UrbanRural' in df:\n df['Compartment'] = df['Compartment'] + '/' + df['UrbanRural']\n if 'cmpt_rh' in df:\n df['Compartment'] = df['Compartment'] + '/' + df['cmpt_rh']\n df['Compartment'] = df['Compartment'].str.replace('/unspecified','')\n return df", "def ismobile(number):\n if number[0] in ['7', '8', '9']:\n return True\n return False", "def map_to_homo_nid(self, ids, ntype):\n ...", "def map_to_homo_nid(self, ids, ntype):\n ...", "def expand_phone_details(self, phone_details):\n summary = {}\n result = {}\n for item in phone_details:\n key = item['data']['systemName'] + ': ' + item['data']['systemVersion']\n summary[key] = summary.get(key, 0) + 1\n result[item['_id']] = key\n return summary, result", "def __add_homes(self):\n for home in self.__positions_of_homes:\n self.__grid[home[0]][home[1]][\"humans\"] = math.floor(\n self.__number_of_humans / self.__number_of_homes\n )", "def phone(self, data_type):\n return sorted(self._phone_paths[data_type])", "def pair_devices(watch, phone):\n command = 'pair \"%s\" \"%s\"' % (watch.udid, phone.udid)\n pair_id = _run_command(command)\n\n # The pair ID has a new line at the end. Strip it when returning.\n return pair_id[:-1]", "def business_phones(self):\n if \"businessPhones\" in self._prop_dict:\n return self._prop_dict[\"businessPhones\"]\n else:\n return None", "def business_phones(self):\n if \"businessPhones\" in self._prop_dict:\n return self._prop_dict[\"businessPhones\"]\n else:\n return None", "def business_phones(self):\n if \"businessPhones\" in self._prop_dict:\n return self._prop_dict[\"businessPhones\"]\n else:\n return None", "def consolidate_mel(mel,delivery=False):\n c_MEL={}\n WP=00\n \n mel['Part No.']=mel['WP Activity/ Part No.']\n mel['Part No.']=mel['Part No.'].astype(str)\n\n #mel['Quantity']=mel['Quantity'].str.replace('m','',regex=False) \n\n mel['Quantity']=mel['Quantity'].fillna(value=0).astype(str) \n mel['Quantity']=mel['Quantity'].str.replace('meters','',regex=True) \n mel['Quantity']=mel['Quantity'].str.replace('m','',regex=False) \n\n\n mel['Quantity']=mel['Quantity'].astype('float')\n if delivery:\n for i, row in mel.iterrows():\n c_MEL[(str(row['Part No.'])+row['Delivery'])]={'Quantity':mel['Quantity'][(mel['Part No.'].astype(str)==str(row['Part No.'])) & (mel['Delivery']==row['Delivery'])].sum(),\n 'Part No.':row['Part No.'],\n 'Delivery':row['Delivery'],\n 'Equipment Description':row['Equipment Description'],\n 'WP':row['WP']}\n else:\n for i, row in mel.iterrows():\n c_MEL[(str(row['Part No.']))]={'Quantity':mel['Quantity'][mel['Part No.'].astype(str)==str(row['Part No.'])].sum(),\n 'Part No.':row['Part No.'],\n 'Equipment Description':row['Equipment Description']}\n \n c_MEL=pd.DataFrame(c_MEL).T \n return c_MEL", "def _get_hardware_id(cls, vendor_id, product_id):\n return \"%s%s\" % (vendor_id.ljust(8), product_id.ljust(16))", "def map_to_homo_nid(self, ids, ntype): # -> None:\n ...", "def updaterecord(phones,username,phonenum):\r\n if username in phones:\r\n phones[username] = phonenum\r\n else:\r\n raise ValueError(\"This username are not exist\")", "def output_to_spreadsheet(routers_switches, phones, aps, others, failed_devices, file_location):\n # Creates Excel workbook and worksheets\n wb = Workbook()\n routers_switches_ws = wb.active\n routers_switches_ws.title = 'Routers_Switches'\n phones_ws = wb.create_sheet('Phones')\n aps_ws = wb.create_sheet('APs')\n others_ws = wb.create_sheet('Others')\n failed_ws = wb.create_sheet('Failed')\n\n alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n\n # Checks if phones contain directory number and description from CUCM export merge\n if any('description' in phone for phone in phones):\n phone_string = 'CUCMPhone'\n else:\n phone_string = 'Phone'\n\n neighbor_count = 1\n # Sets 'neighbor_count' to length of longest neighbor list in routers_switches dictionaries\n for rt_sw in routers_switches:\n if rt_sw['connection_attempt'] == 'Failed':\n if len(rt_sw['neighbors']) > neighbor_count:\n neighbor_count = len(rt_sw['neighbors'])\n\n def write_header(worksheet, device_type):\n \"\"\"\n :param device_type: 'RouterSwitch', 'Phone', 'CUCMPhone', 'WAP', 'Other', or 'Failed'\n :param worksheet: Device worksheet\n :return: int(header_length), list(header)\n \"\"\"\n header = ['Hostname', 'IP Address', 'Model', 'Software Version']\n if device_type == 'RouterSwitch':\n header += ['Serial', 'Connection Type', 'ROMMON', 'Connection Attempt', 'Discovery Status']\n for n in range(1, neighbor_count + 1):\n header += [f'Neighbor {n} Hostname', f'Neighbor {n} IP Address', f'Local Interface to Neighbor {n}',\n f'Neighbor {n} Interface']\n elif device_type == 'Phone' or device_type == 'CUCMPhone':\n header += ['Voice VLAN', 'MAC Address', 'Switch Hostname', 'Switch IP Address', 'Switchport']\n if device_type == 'CUCMPhone':\n header += ['Description', 'Main Directory Number']\n elif device_type == 'WAP':\n header += ['Switch Hostname', 'Switch IP Address', 'Switchport']\n elif device_type == 'Other':\n header += ['Neighbor Hostname', 'Neighbor IP Address', 'Local Interface to Neighbor', 'Neighbor Interface']\n elif device_type == 'Failed':\n header = ['IP Address', 'Connection Type', 'Device Type', 'Connectivity', 'Authentication',\n 'Authorization', 'Discovery Status', 'Connection Exception']\n worksheet.append(header)\n return len(header), header\n\n def write_to_sheet(device_list, worksheet, device_type):\n \"\"\"\n :param device_type: 'RouterSwitch', 'Phone', 'CUCMPhone', 'WAP', 'Other', or 'Failed'\n :param device_list: List of devices\n :param worksheet: Device worksheet\n :return: list(rows)\n \"\"\"\n rows = []\n for device in device_list:\n if device_type != 'Failed':\n row = [device['hostname'], device['ip_address'], device['model'], device['software_version']]\n if device_type == 'RouterSwitch':\n if 'serial' in device:\n serial = device['serial']\n connection_type = device['connection_type']\n rommon = device['rommon']\n else:\n serial = 'Unknown'\n connection_type = 'Unknown'\n rommon = 'Unknown'\n row += [serial, connection_type, rommon, device['connection_attempt'], device['discovery_status']]\n if device['connection_attempt'] == 'Failed':\n for neighbor in device['neighbors']:\n row += [neighbor['hostname'], neighbor['ip_address'], neighbor['local_intf'],\n neighbor['remote_intf']]\n if device_type == 'Phone' or device_type == 'CUCMPhone':\n neighbor = device['neighbor']\n row += [device['voice_vlan'], device['mac_addr'], neighbor['hostname'], neighbor['ip_address'],\n neighbor['remote_intf']]\n if 'description' in device:\n row += [device['description'], device['directory_number']]\n if device_type == 'WAP' or device_type == 'Other':\n neighbor = device['neighbor']\n row += [neighbor['hostname'], neighbor['ip_address'], neighbor['remote_intf']]\n if device_type == 'Other':\n row.append(neighbor['local_intf'])\n else:\n row = [device['ip_address'], device['connection_type'], device['device_type'], device['connectivity'],\n device['authentication'], device['authorization'], device['discovery_status'],\n device['exception']]\n worksheet.append(row)\n rows.append(row)\n return rows\n\n def complete_sheet(device_list, worksheet, device_type):\n \"\"\"Completes workbook sheet\"\"\"\n column_num = len(device_list) + 1\n header_out = write_header(worksheet, device_type)\n header = header_out[1]\n header_length = header_out[0]\n letter = header_length - 1\n if letter > 25:\n column_letter = f'{alphabet[int(letter / 26) - 1]}{alphabet[letter % 26]}'\n else:\n column_letter = alphabet[letter]\n bottom_right_cell = f'{column_letter}{column_num}'\n rows = write_to_sheet(device_list, worksheet, device_type)\n\n # Creates table if there is data in table\n if len(device_list) != 0:\n table = Table(displayName=device_type, ref=f'A1:{bottom_right_cell}')\n style = TableStyleInfo(name='TableStyleMedium9', showFirstColumn=False, showLastColumn=False,\n showRowStripes=True, showColumnStripes=True)\n table.tableStyleInfo = style\n worksheet.add_table(table)\n\n # Sets column widths\n all_data = [header]\n all_data += rows\n column_widths = []\n for row in all_data:\n for i, cell in enumerate(row):\n if len(column_widths) > i:\n if len(str(cell)) > column_widths[i]:\n column_widths[i] = len(str(cell))\n else:\n column_widths += [len(str(cell))]\n\n for i, column_width in enumerate(column_widths):\n if i > 25:\n l1 = f'{alphabet[int(i / 26) - 1]}{alphabet[i % 26]}'\n else:\n l1 = alphabet[i]\n worksheet.column_dimensions[l1].width = column_width + 3\n\n complete_sheet(routers_switches, routers_switches_ws, 'RouterSwitch')\n complete_sheet(phones, phones_ws, phone_string)\n complete_sheet(aps, aps_ws, 'WAP')\n complete_sheet(others, others_ws, 'Other')\n complete_sheet(failed_devices, failed_ws, 'Failed')\n\n # Saves workbook\n date_time = datetime.now().strftime('%m_%d_%Y-%H_%M_%S')\n wb.save(f'{file_location}/network_inventory-{date_time}-.xlsx')", "def test_address__normalize_phone_number__6():\n assert '+421234567891' == normalize_phone_number(\n '0042-1234/5678-91', '+49')", "def get_experiment_phn_info():\n phone_list = ['##', 'aa', 'ae', 'ao', 'aw', 'ax', 'ay', 'bb', 'br',\n 'ch', 'dd', 'dh', 'eh', 'er', 'ey', 'ff', 'gg', 'hh', 'ih',\n 'iy', 'jh', 'kk', 'll', 'mm', 'ng', 'nn', 'ow', 'oy', 'pp',\n 'rr', 'sh', 'sp', 'ss', 'th', 'tt', 'uh', 'uw', 'vv', 'ww',\n 'yy', 'zh', 'zz']\n ph2id = {ph: i for i, ph in enumerate(phone_list)}\n id2ph = {i: ph for i, ph in enumerate(phone_list)}\n\n return phone_list, ph2id, id2ph", "def test_group_by_hardware_info(self):\n self._test_group_by('Hardware Info', [1, 1, 2, 1, 1])" ]
[ "0.557691", "0.48557055", "0.47899625", "0.45926788", "0.45099318", "0.44751787", "0.4453989", "0.44258246", "0.4379909", "0.43671274", "0.43414667", "0.4331737", "0.43276244", "0.42938775", "0.42938775", "0.42811003", "0.42746872", "0.42629614", "0.42539835", "0.42346013", "0.42346013", "0.42346013", "0.42281199", "0.4222738", "0.42103195", "0.4192845", "0.41862008", "0.41766828", "0.415772", "0.41551834" ]
0.6594883
0
Splits the comma separated values in the emails column into a maximum of 3 different columns.
def split_emails(df, column): df['Email'] = df[column].str.extract(r'(.*?@.*?\....),?', expand=True) df['Email 2'] = df[column].str.extract(r'.*@.*\....,\s?(.*@.*\....)', expand=True) df['Email 3'] = df[column].str.extract(r'.*@.*\....,\s?.*@.*\....,\s?(.*@.*\....)', expand=True) return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_into_columns(s):\n\ts = re.sub(',,,', ',0,0,', s)\n\ts = re.sub(',,', ',0,', s)\n\treturn s.split(',')", "def _parse_emails(self, emails):\n return [e.strip() for e in emails.split(',')]", "def get_email_ids(self):\n if self.emails is None or self.emails == '':\n return []\n email_ids = self.emails.replace(' ', '')\n return email_ids.split(',')", "def split_email_addresses(line):\n if line:\n addrs = line.split(',')\n addrs = frozenset(map(lambda x: x.strip(), addrs))\n else:\n addrs = None\n return addrs", "def separate_comma(s):\n return s.split(',')", "def multi_emails(max_emails: int = 2) -> st.SearchStrategy[str]:\n return st.lists(\n emails(),\n min_size=1,\n max_size=max_emails,\n ).map(lambda email: \";\".join(email))", "def split_by_comma(s):\n return s.strip().split(\",\")", "def _parse_to_emails(self, to_emails):\n tos = []\n if not isinstance(to_emails, (list, tuple)):\n to_emails = [to_emails]\n for email in to_emails:\n if isinstance(email, str):\n tos.append(self._generate_email(email))\n elif isinstance(email, dict):\n tos.append(self._generate_email(**email))\n else:\n raise ValueError('Invalid data format')\n return tos", "def write_to_csv(list_of_emails):\n import csv\n # use newline='' to prevent double-spaced rows\n with open('emails.csv', 'w', newline='') as outFile:\n outWriter = csv.writer(outFile)\n charNum = outWriter.writerow(['email'])\n for i in list_of_emails:\n charNum = outWriter.writerow([i])\n outFile.close()", "def clean_row(row,i):\n # convert string\n char_array = np.array(list(row))\n\n #insert entry dividers, then split by them\n div_ix = (\n np.array([6, 34, 48, 51, 54, 60, 64, 67, 72, 80, 86, 94, 100,\n 107, 112, 119, 125, 137, 141, 145, 156]),\n )\n char_array[div_ix] = ','\n new_csv_row = (''.join(char_array)).split(',')\n\n # remove excess whitespace surrounding data\n new_csv_row = np.array([entry.strip() for entry in new_csv_row])\n\n return new_csv_row", "def split_field_content(cls, string):\n if \",\" in string and not is_rfc1123_datetime(string):\n return [s.strip() for s in string.split(\",\")]\n else:\n return string", "def split_imeis(imeis):\n if imeis:\n return imeis[1:-1].split(',')\n else:\n return None", "def split_phones(df, column):\n\n df['Work'] = df[column].str.extract(r'(...-...-....)\\(W\\)', expand=True)\n df['Mobile'] = df[column].str.extract(r'(...-...-....)\\(M\\)', expand=True)\n df['Mobile 2'] = df[column].str.extract(r'...-...-....\\(M\\).*?(...-...-....)\\(M\\)', expand=True)\n df['Mobile 3'] = df[column].str.extract(r'...-...-....\\(M\\).*?...-...-....\\(M\\).*?(...-...-....)\\(M\\)', expand=True)\n df['Home'] = df[column].str.extract(r'(...-...-....)\\(H\\)', expand=True)\n df['Mobile_'] = df[column].str.extract(r'(...-...-....)\\(C\\)', expand=True)\n df['Mobile 2_'] = df[column].str.extract(r'...-...-....\\(C\\).*?(...-...-....)\\(C\\)', expand=True)\n df['Mobile 3_'] = df[column].str.extract(r'...-...-....\\(C\\).*?...-...-....\\(C\\).*?(...-...-....)\\(C\\)', expand=True)\n df['Mobile'] = df['Mobile'].combine_first(df['Mobile_'])\n df['Mobile 2'] = df['Mobile 2'].combine_first(df['Mobile 2_'])\n df['Mobile 3'] = df['Mobile 3'].combine_first(df['Mobile 3_'])\n df.drop([column, 'Mobile_', 'Mobile 2_', 'Mobile 3_'], axis=1, inplace=True)\n df = remove_non_numeric(df, ['Mobile', 'Mobile 2', 'Mobile 3', 'Work', 'Home'])\n return df", "def split_values(self, value):\n if value:\n return [s.strip() for s in value.split(',')]\n else:\n return []", "def split_columns(l):\n return [l[:3], l[3:7], l[7:12], l[12:16], l[16:]]", "def email_extract(self, email_series: pd.Series):\n return (email_series\n .str.extract(self.email_extract_re, expand=False)\n .map(lambda domain: '' if domain in self.blacklist_domains else domain)\n .pipe(self._clean_domain_punctuation))", "def process_domains(self, save_path=None):\r\n emails = self.db.read_sql(\"SELECT * FROM emails\")\r\n emails.loc[:, email2] = emails.loc[:, email].apply(self.parse_email) \r\n emails.loc[:, DOMAIN] = emails.loc[:, email2].apply(self.get_domain)\r\n emails.drop_duplicates([DOMAIN], inplace=True)\r\n if save_path:\r\n emails.to_csv(save_path, index=False)\r\n emails.loc[:,DOMAIN].to_sql(DOMAINS, self.db.con, if_exists='append', index=False)", "def csv_to_field_Urls(entity, value):\n if value is None or value == '':\n return\n splitter = re.compile(url_splitter)\n entity.string = splitter.split(value)", "def tidy_split(df, column='Members', sep=', '):\n\n indexes = []\n new_values = []\n for i, presplit in enumerate(df[column].astype(str)):\n for value in presplit.split(sep):\n indexes.append(i)\n new_values.append(value)\n new_df = df.iloc[indexes, :].copy() # the .copy() Prevents a warning\n new_df[column] = new_values\n df = new_df.reset_index(drop=True)\n return df", "def clean_email_column(df: pd.DataFrame) -> pd.DataFrame:\n try:\n df[\"Email\"] = df[\"Email\"].apply(_clean_email_strings)\n return df\n except ValueError:\n print(\"'Email' column not found, please check the input file structures.\")", "def getEmail(self, data):\r\n\t\tprint('test')\r\n\t\t# Empty array to hold unique emails\r\n\t\tno_dp_email = []\r\n\r\n\t\t# Loop through each row in the dataframe...\r\n\t\tfor row in data.itertuples():\r\n\t\t\tprint('test')\r\n\r\n\t\t\t# Parse through the row's keywords string for emails...\r\n\t\t\temails = re.findall(\"[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Za-z]{2,4}\", row.keywords)\r\n\t\t\tprint(emails)\r\n\t\t\tprint('test')\r\n\r\n\t\t\t# For each email in the array...\r\n\t\t\tfor email in emails:\r\n\t\t\t\tprint('test')\r\n\r\n\t\t\t\temail = str(email)\r\n\r\n\t\t\t\t# Append this email onto the array if it is not a repeat\r\n\t\t\t\tif email not in no_dp_email:\r\n\t\t\t\t\tprint('test')\r\n\r\n\t\t\t\t\tno_dp_email.append(email)\r\n\t\t\r\n\t\t# return array of unique emails\r\n\t\treturn no_dp_email", "def mail_address(mail_addr_list):\n if mail_addr_list:\n mail_addr_list = mail_addr_list.replace(\" \", \"\")\n if \",\" in mail_addr_list:\n mail_addr_list = mail_addr_list.replace(\",\", \";\")\n mail_addr_list = mail_addr_list.split(\";\")\n for mail_addr in mail_addr_list:\n if len(mail_addr.split(\"@\")) != 2:\n raise ArgumentTypeError(\"Invalid mail address: %s\" % mail_addr)\n return mail_addr_list\n else:\n raise ArgumentTypeError(\"mail address is not specified\")", "def FE_split_one_field_into_many(df_in, field, splitter, filler, new_names_list='', add_count_field=False):\r\n df_field = df_in[field].values\r\n df = copy.deepcopy(df_in)\r\n ### First copy whatever is in that field so we can save it for later ###\r\n df[field].fillna(filler, inplace=True)\r\n if add_count_field:\r\n ### there will be one extra field created when we count the number of contents in each field ###\r\n max_things = df[field].map(lambda x: len(x.split(splitter))).max() + 1\r\n else:\r\n max_things = df[field].map(lambda x: len(x.split(splitter))).max()\r\n if len(new_names_list) == 0:\r\n print(' Max. columns created by splitting %s field is %d.' %(\r\n field,max_things))\r\n else:\r\n if not max_things == len(new_names_list):\r\n print(\"\"\" Max. columns created by splitting %s field is %d but you have given %d \r\n variable names only. Selecting first %d\"\"\" %(\r\n field,max_things,len(new_names_list),len(new_names_list)))\r\n ### This creates a new field that counts the number of things that are in that field.\r\n if add_count_field:\r\n #### this counts the number of contents after splitting each row which varies. Hence it helps.\r\n num_products_viewed = 'Content_Count_in_'+field\r\n df[num_products_viewed] = df[field].map(lambda x: len(x.split(splitter))).values\r\n ### Clean up the field such that it has the right number of split chars otherwise add to it\r\n ### This fills up the field with empty strings between each splitter. You can't do much about it.\r\n #### Leave this as it is. It is not something you can do right now. It works.\r\n fill_string = splitter + filler\r\n df[field] = df[field].map(lambda x: x+fill_string*(max_things-len(x.split(splitter))) if len(\r\n x.split(splitter)) < max_things else x)\r\n ###### Now you create new fields by split the one large field ########\r\n if isinstance(new_names_list, str):\r\n if new_names_list == '':\r\n new_names_list = [field+'_'+str(i) for i in range(1,max_things+1)]\r\n else:\r\n new_names_list = [new_names_list]\r\n ### First fill empty spaces or NaNs with filler ###\r\n df.loc[df[field] == splitter, field] = filler\r\n for i in range(len(new_names_list)):\r\n try:\r\n df[new_names_list[i]] = df[field].map(lambda x: x.split(splitter)[i]\r\n if splitter in x else filler)\r\n except:\r\n df[new_names_list[i]] = filler\r\n continue\r\n ### there is really nothing you can do to fill up since they are filled with empty strings.\r\n #### Leave this as it is. It is not something you can do right now. It works.\r\n df[field] = df_field\r\n return df, new_names_list", "def split_column(df,col_name,reg_ex=',',keep=False):\n # https://stackoverflow.com/a/51680292/5847441\n df = df.select(col_name,posexplode(split(col_name,reg_ex)).alias('pos','val'))\\\n .select(col_name,concat(lit(col_name),col('pos').cast('string')).alias('name'),'val')\\\n .groupBy(col_name).pivot('name').agg(first('val'))\n if keep:\n return df\n else:\n return df.drop(col_name)", "def from_csv_line(line):\r\n return line.strip().split(',')", "def add_recipients(df, all_emails):\n user = df[\"sender\"].iloc[0] # ID of the user\n emails = all_emails[user]\n df[\"emails\"] = str(list(emails))\n df[\"emails\"] = df[\"emails\"].map(literal_eval)\n return df", "def csv_to_field_CampaignLanguages(entity, value):\n if value is None or value == '':\n return\n splitter = re.compile(r';')\n entity.string = splitter.split(value)", "def validate_string_of_email_addresses(string_of_email_addresses):\n\n # Remove any extra whitespaces, trailing commas, new lines, etc.\n cleaned_string_of_email_addresses = string_of_email_addresses.replace(\" \", \"\").strip(\",\").strip()\n\n # Split on commas, remove duplicates with set(), then convert back to a list.\n email_addresses = list(set(cleaned_string_of_email_addresses.split(\",\")))\n\n for email_address in email_addresses:\n if check_email_address_validity(email_address) is False:\n raise ValidationError(\n f\"Invalid email address found in string, enter comma delimited email addresses: {string_of_email_addresses}\"\n )\n\n # List of email addresses looks good, return a comma separated string of email addresses.\n valid_emails_string = \",\".join(email_addresses)\n\n return valid_emails_string", "def email_slicer():\n\n # email = '[email protected]'\n # t_l_d_s = ['com', 'net', 'co.ug', 'us', 'tech', 'info', 'biz', '']\n email = input('Enter your full email address: ')\n\n # Getting names\n username = email[:email.index('@')]\n f_name, l_name = username.split('.')\n\n # Getting domain\n domain = email[email.index('@') + 1:]\n d_name, t_l_d = domain.split('.')\n display = f'You first name is {f_name.capitalize()} and last {l_name.capitalize()}, ' \\\n f'registered on {d_name.capitalize()}\\'s domain with .\"{t_l_d.upper()}\" as the TLD!'\n # print(type(f_name))\n return display", "def split_device_list(devices: str) -> list:\n return devices.split(\",\")" ]
[ "0.67024225", "0.65343946", "0.5943639", "0.59189403", "0.55012566", "0.5432298", "0.5371343", "0.52167886", "0.5202834", "0.5180676", "0.5168435", "0.5163331", "0.5148711", "0.51152694", "0.50926095", "0.50753117", "0.5074737", "0.5072455", "0.506463", "0.50392157", "0.5035906", "0.5035493", "0.5007338", "0.49914595", "0.4987786", "0.4976711", "0.4959177", "0.49568915", "0.4924624", "0.49119526" ]
0.7557674
0
Removes all leading and trailing whitespace. Replaces all newlines, carriage returns, and invisible tabbreaks with a space. \n If a column isn't specified, it acts on the entire dataframe.
def strip_whitespace(df, column=None): if column is None: for x in df.columns: if df[x].dtypes == object: df[x] = pd.core.strings.str_strip(df[x]) df[x] = df[x].str.replace('\n', '') df[x] = df[x].str.replace(r'\r', ' ', regex=True) df[x] = df[x].str.replace(r'\n', ' ', regex=True) df[x] = df[x].str.replace(r'\v', ' ', regex=True) elif isinstance(column, list): for x in column: if df[x].dtypes == object: df[x] = pd.core.strings.str_strip(df[x]) df[x] = df[x].str.replace(r'\r', ' ', regex=True) df[x] = df[x].str.replace(r'\n', ' ', regex=True) df[x] = df[x].str.replace(r'\v', ' ', regex=True) else: if df[column].dtypes == object: df[column] = pd.core.strings.str_strip(df[column]) df[column] = df[column].str.replace(r'\r', ' ', regex=True) df[column] = df[column].str.replace(r'\n', ' ', regex=True) df[column] = df[column].str.replace(r'\v', ' ', regex=True) return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_df(self, df, column_name):\r\n \r\n df[column_name] = df[column_name].fillna('').str.replace('\\n', ' ')\r\n return df", "def shrink_whitespace(data: pd.Series) -> pd.Series:\n data = data.replace(r'\\s+', value=' ', regex=True)\n return data.str.strip()", "def strip_columns(df: DataFrame) -> DataFrame:\r\n return df.apply(lambda x: x.str.strip() if x.dtype == 'object' else x)", "def clean_whitespace(text):\n return text\n #return re.sub(r'\\r\\n|\\n', \"\\t\", text)", "def trim_all_columns(df):\n trim_strings = lambda x: x.strip() if isinstance(x, str) else x\n return df.applymap(trim_strings)", "def NormalizeWhitespace (text, preserve=False, replace=False, collapse=False):\n if preserve:\n return text\n text = __TabCRLF_re.sub(' ', text)\n if replace:\n return text\n if collapse:\n return __MultiSpace_re.sub(' ', text).strip()\n # pyxb not imported here; could be.\n raise Exception('NormalizeWhitespace: No normalization specified')", "def dataCleaner(dataframe):\r\n dataframe = dataframe.dropna(how='all')\r\n for col in dataframe:\r\n dataframe[col] = dataframe[col].apply(lambda x : np.nan() if str(x).isspace() else x)\r\n dataframe[col] = dataframe[col].fillna(dataframe[col].mean())\r\n return dataframe", "def clean_lines(df_column):\n \n clean_lines = []\n # pattern for html tags\n tag_match = re.compile('<.*?>')\n # patterm for website\n website_match = re.compile('https?:\\/\\/.*[\\r\\n]*')\n # pattern for tex\n tex_match = re.compile('\\$\\$?.+?\\$\\$?')\n \n for line in df_column:\n s = re.sub(tag_match, '', line)\n s = re.sub(website_match, '[website]', s)\n s = re.sub(tex_match, '[tex]', s)\n # replace extra whitespace with spaces\n for x in string.whitespace:\n s = s.replace(x, ' ')\n clean_lines.append(s)\n \n return clean_lines", "def _tab_newline_replace( self, fromlines, tolines ):\n\t\tdef expand_tabs( line ):\n\t\t\t# hide real spaces\n\t\t\tline = line.replace( ' ', '\\0' )\n\t\t\t# expand tabs into spaces\n\t\t\tline = line.expandtabs( self._tabsize )\n\t\t\t# relace spaces from expanded tabs back into tab characters\n\t\t\t# (we'll replace them with markup after we do differencing)\n\t\t\tline = line.replace( ' ', '\\t' )\n\t\t\treturn line.replace( '\\0', ' ' ).rstrip( '\\n' )\n\t\tfromlines = [expand_tabs( line ) for line in fromlines]\n\t\ttolines = [expand_tabs( line ) for line in tolines]\n\t\treturn fromlines, tolines", "def clean_all(text):\n # anticipate Null values in columns that will be cleaned\n if text is not None and type(text) is not float:\n text = \"\".join(text)\n no_ucode = clean_unicode(text)\n no_space = \"\".join(clean_whitespaces(no_ucode.strip()))\n text = no_space.strip()\n\n return text", "def fix_whitespace(lines: Sequence[str], eol: str, ends_with_eol: bool) -> str:\n lines = _strip(lines)\n lines = [i.expandtabs(4) for i in lines]\n result = eol.join(lines)\n if ends_with_eol:\n result += eol\n return result", "def reduceBlank(text, keepNewLines=False):\n if text is None:\n return None\n text = text.strip()\n if not keepNewLines:\n return re.sub(r'\\s+', ' ', text)\n else:\n text = re.sub(r'\\r', '\\n', text)\n text = re.sub(r'\\s*\\n+\\s*', '\\n', text)\n text = re.sub(r'[ \\t\\f\\v]+', ' ', text)\n return text", "def normalize_whitespace(text):\n return RE_NONBREAKING_SPACE.sub(\" \", RE_LINEBREAK.sub(r\"\\n\", text)).strip()", "def shrink_space(data):\n # remove leading and trailing spaces\n data = data.strip()\n # collapse multiple lines to one single line\n data = re.sub(\"\\n+\",\"\\n\",data)\n\n return data", "def destringify_df(df: pd.DataFrame, separator=\"\\t\"):\n df = split_stuff(df, separator=separator).T\n df = dataframe_stuff(df)\n return df", "def removeMultipleSpaces(self) -> None:\n self.text = re.sub('\\s+', ' ', self.text)", "def clean_crlf(fpath):\n sub = path.basename(path.dirname(fpath))\n \n with open(fpath, 'rb') as f:\n raw_content = f.read()\n lfnull_content = raw_content.replace(b'\\r',b'')\n \n outpath = path.join('..','sourcedata','ds3','sub-'+sub,'sub-'+sub+'_task-all_beh.tsv')\n with open(outpath, 'w') as f:\n f.write(lfnull_content.decode(\"utf-8\"))\n\n return(pd.read_csv(outpath, delimiter='\\t'))", "def remove_spaces_from_columns_names(file_path):\n cprint(f\"### Function Name:-> {inspect.stack()[0][3]} ###\", 'yellow', 'on_grey', attrs=['bold'])\n try:\n path_obj = Path(file_path)\n df = get_df_from_data_file(file_path)\n df.columns = df.columns.str.strip()\n delete_data_file(file_path)\n if path_obj.suffix == \".xlsx\":\n df.to_excel(path_obj.as_posix(), index=False)\n elif path_obj.suffix == \".csv\":\n df.to_csv(path_obj.as_posix(), index=False, sep=',')\n except Exception as ex:\n cprint(traceback.format_exc(), 'red')\n log_exception(traceback.format_exc())", "def prepare_input_df(df: DataFrame) -> DataFrame:\r\n df = df.fillna('') # Fill np.nan values with blanks (\"\").\r\n df = to_upper(df) # Force case to UPPER for all columns.\r\n df = strip_columns(df) # Remove trailing whitespace.\r\n return df", "def test_leading_trailing_whitespaces_in_fields_are_stripped(self):\n self.df[\"new_concat_field_ae\"] = concat_fieldvalues(self.df, ['a', 'e'])\n\n expected_result_ae = pd.DataFrame({'new_concat_field_ae': ['x y12', 'y', 'x']})\n assert_series_equal(self.df[\"new_concat_field_ae\"], expected_result_ae['new_concat_field_ae'])", "def clean_hanging_newline(t):\n if t and t[-1] == \"\\n\":\n return t[:-1]\n return t", "def replace_newline_with_space(text):\n return re.sub(\"[\\n\\r]\", \" \", text)", "def removeSingleChars(self) -> None:\n self.text = re.sub('\\s[^\\n\\s]\\s', ' ', self.text)", "def strip_space():\n pass", "def clean(df):", "def data_cleaning(df):\n df = df.dropna(how='all')\n \n return df", "def remove_leading_blanks(self, sentence):\n pass", "def to_multi_lined_df(df: pd.DataFrame):\n if len(df) == 0:\n return df\n broken_dfs = []\n for col_index in range(df.shape[1]):\n column_series = df.iloc[:, col_index]\n column_series = column_series.apply(str)\n # \"AttributeError: Can only use .str accessor with string values!\" here, if we do not have strings everywhere\n multi_lined_column_series = column_series.str.split(\"\\n\", expand=True).stack()\n broken_dfs.append(multi_lined_column_series)\n # If without keys, column names in the concat become 0, 1\n multi_lined_df = pd.concat(broken_dfs, axis=1, keys=df.columns)\n multi_lined_df = multi_lined_df.fillna(\"\")\n # Keep indices intentionally\n return multi_lined_df", "def preProcessText(col):\n reponct = string.punctuation.replace(\"?\",\"\").replace(\"/\",\"\")\n rehtml = re.compile('<.*>')\n extr = col.str.strip()\n extr = extr.str.replace(rehtml, '', regex=True)\n extr = extr.str.translate(str.maketrans('','',reponct))\n extr = extr.str.replace('[^0-9a-zA-Z?/ ]+', ' ', regex=True)\n extr = extr.str.replace('\\s+', ' ', regex=True)\n extr = extr.str.lower()\n return extr", "def remove_blank_lines(text):\n out_text = \"\"\n blank = True\n for line in text.splitlines(True):\n if line.isspace():\n if not blank:\n blank = True\n out_text = out_text + line\n else:\n blank = False\n out_text = out_text + line\n return out_text" ]
[ "0.7356934", "0.6805515", "0.6794057", "0.64226145", "0.64139557", "0.6157395", "0.6111862", "0.6067155", "0.5934019", "0.59189373", "0.5878997", "0.58672464", "0.5864388", "0.5830358", "0.58267504", "0.58051336", "0.5795834", "0.5767993", "0.57601017", "0.573922", "0.57248724", "0.5714557", "0.5684751", "0.5670857", "0.5667166", "0.56551516", "0.56296766", "0.5629078", "0.56225836", "0.55974126" ]
0.7584346
0
Splits a column of comma separated values into their own rows with values identical to the original.
def tidy_split(df, column='Members', sep=', '): indexes = [] new_values = [] for i, presplit in enumerate(df[column].astype(str)): for value in presplit.split(sep): indexes.append(i) new_values.append(value) new_df = df.iloc[indexes, :].copy() # the .copy() Prevents a warning new_df[column] = new_values df = new_df.reset_index(drop=True) return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_column(df,col_name,reg_ex=',',keep=False):\n # https://stackoverflow.com/a/51680292/5847441\n df = df.select(col_name,posexplode(split(col_name,reg_ex)).alias('pos','val'))\\\n .select(col_name,concat(lit(col_name),col('pos').cast('string')).alias('name'),'val')\\\n .groupBy(col_name).pivot('name').agg(first('val'))\n if keep:\n return df\n else:\n return df.drop(col_name)", "def split_into_columns(s):\n\ts = re.sub(',,,', ',0,0,', s)\n\ts = re.sub(',,', ',0,', s)\n\treturn s.split(',')", "def tidy_split(df, column, sep='|', keep=False):\r\n indexes = list()\r\n new_values = list()\r\n df = df.dropna(subset=[column])\r\n for i, presplit in enumerate(df[column].astype(str)):\r\n values = presplit.split(sep)\r\n if keep and len(values) > 1:\r\n indexes.append(i)\r\n new_values.append(presplit)\r\n for value in values:\r\n indexes.append(i)\r\n new_values.append(value)\r\n new_df = df.iloc[indexes, :].copy()\r\n new_df[column] = new_values\r\n return new_df", "def from_csv_line(line):\r\n return line.strip().split(',')", "def clean_row(row,i):\n # convert string\n char_array = np.array(list(row))\n\n #insert entry dividers, then split by them\n div_ix = (\n np.array([6, 34, 48, 51, 54, 60, 64, 67, 72, 80, 86, 94, 100,\n 107, 112, 119, 125, 137, 141, 145, 156]),\n )\n char_array[div_ix] = ','\n new_csv_row = (''.join(char_array)).split(',')\n\n # remove excess whitespace surrounding data\n new_csv_row = np.array([entry.strip() for entry in new_csv_row])\n\n return new_csv_row", "def split_by_comma(s):\n return s.strip().split(\",\")", "def split_values(self, value):\n if value:\n return [s.strip() for s in value.split(',')]\n else:\n return []", "def split_line(line):\n if ',' in line:\n return [a.strip() for a in line.split(',')]\n return line.split()", "def parse_normalized(line):\n return line.strip().split(',')", "def convert_line(line):\n line = line.strip().replace(\"]\", \"\").replace(\"[\", \"\")\n return line.split(\",\")", "def coerce_strings(df, col):\r\n for i,j in enumerate(df[col]):\r\n if type(j) == str:\r\n j = float(j.replace(',', ''))\r\n df[col].iloc[i] = j\r\n\r\n return df", "def separate_comma(s):\n return s.split(',')", "def split_line(line: str) -> [str]:\n return line.strip().split(',')", "def column_to_list(column):\n column = column.tolist()\n column_string = \"\"\n for i in range(len(column)):\n current_row = column[i]\n try:\n column_string = column_string + current_row + \" \"\n except:\n pass\n column_list = column_string.split()\n return column_list", "def csv_line(value_parser):\n def convert(string):\n return list(map(value_parser, string.split(',')))\n return convert", "def split_values(value):\n try:\n result = dtype([conv(x) for x in value.split(',')])\n except:\n raise argparse.ArgumentTypeError('Expect comma-separated tuple')\n\n if num_items and len(result) != num_items:\n raise argparse.ArgumentTypeError('Expected {} items'.format(num_items))\n\n return result", "def normalize(column):\n value_set = set(column)\n unique_count = len(value_set)\n if unique_count == 1:\n # skip everything in this column. \n return []\n elif unique_count == 2:\n zero = list(value_set)[0]\n one = list(value_set)[1]\n normalized_column = []\n for value in column:\n normalized_column.append(1 if value == one else 0)\n return [normalized_column]\n else: \n all_values = list(value_set)\n normalized_column = []\n\n # expand into multiple columns \n for index in range(len(all_values)):\n normalized_column.append([])\n\n for value in column:\n for index in range(len(all_values)):\n normalized_column[index].append(1 if value == all_values[index] else 0)\n \n return normalized_column", "def get_award_ids (self, column):\n raw = self[column]\n vals = map (lambda x:x.strip(), raw.split(','))\n # return list (set (vals))\n return vals\n\n if 0:\n # truncated = filter (None, map (lambda x: len(x)>5 and x[-5:] or None, vals))\n truncated = filter (None, map (lambda x: len(x)>5 and normalize_id(x) or None, vals))\n\n # we only want the unique values (e.g. crossref lists dups sometimes)\n return list (set (truncated))", "def __parseCsvRow(row):\r\n \r\n resultRow = []\r\n for item in row:\r\n if type(item) is str:\r\n if \".\" in item:\r\n try:\r\n f = float(item)\r\n resultRow.append(f)\r\n except ValueError:\r\n resultRow.append(item)\r\n else:\r\n try:\r\n i = int(item)\r\n resultRow.append(i)\r\n except ValueError:\r\n resultRow.append(item)\r\n else:\r\n resultRow.append(item)\r\n return resultRow", "def explode(self, column):\n return DataFrameDefault.register(pandas.DataFrame.explode)(self, column)", "def processRow(self, row):\n\t\tif self.delim is not None:\n\t\t\trowArr = row.split(self.delim)\n\t\t\tmsg = \"row does not have expected number of columns found \" + str(len(rowArr)) + \" expected \" + str(self.rowSize)\n\t\t\tassert len(rowArr) == self.rowSize, msg\n\t\telse:\n\t\t\trowArr = row\n\t\t\t\n\t\tnewRowArr = []\n\t\tfor i in range(len(rowArr)):\n\t\t\tcurVal = rowArr[i]\n\t\t\tif (i in self.catValues):\n\t\t\t\tvalues = self.catValues[i]\n\t\t\t\tfor val in values:\n\t\t\t\t\tif val == curVal:\n\t\t\t\t\t\tnewVal = self.trueVal\n\t\t\t\t\telse:\n\t\t\t\t\t\tnewVal = self.falseVal\n\t\t\t\t\tnewRowArr.append(newVal)\n\t\t\telse:\n\t\t\t\tnewRowArr.append(curVal)\n\t\tassert len(newRowArr) == self.newRowSize, \"invalid new row size \" + str(len(newRowArr)) + \" expected \" + str(self.newRowSize)\n\t\tencRow = self.delim.join(newRowArr) if self.delim is not None else newRowArr\n\t\treturn encRow", "def row_from_chunks(chunks):\n # Some values migh have commas in then. In that case we re-concatenate\n # chunks between quotes\n merging = False\n merged_value = ''\n quote = None # Record quote as '\\'' and look for this as the end quote also.\n row = []\n for chunk in chunks:\n # Important that we are not already merging, i do not restart - this is\n # an edge case actually gives an error in our data..\n if chunk.startswith('\\'') and not merging:\n merging = True\n quote = chunk[0]\n merged_value += chunk\n elif merging:\n merged_value += chunk\n else:\n row.append(chunk)\n\n # If the chunk ends with a quote, append the merged value to the row, and stop mergin\n # At this point, if merging is True, quote should not be None, if so, we would just like\n # things to blow up here\n if merging and chunk.endswith(quote):\n merging = False\n quote = None\n row.append(merged_value)\n return row", "def fix_values(df, col):\n broken_values = [value for value in df[col]]\n fixed_values = []\n for value in broken_values:\n fixed_values.append(int(value.replace(',','')\n .replace('$','')))\n df[col] = fixed_values", "def split_on_column(df, index_column, split_column, new_index_column, new_split_column, split_char=';'):\n # Produce a Series with index from index_column and values from split_column (split by split_char).\n # Each series constructor is called with (value, [split1, split2, split3]) which produces [(value, split1), (value, split2), (value, split3)]\n split = pd.concat([pd.Series(row[index_column], row[split_column].split(split_char))\n for _, row in df.iterrows()]).reset_index()\n # Add column names\n split.columns = [new_split_column, new_index_column]\n # Return reversed columns\n return split[[new_index_column, new_split_column]]", "def setcolumns(self, columns):\n\n self.__column_list = []\n for i in columns.split(\",\"):\n if str(i).strip().isdigit():\n self.__column_list.append(int(i) - 1)", "def pandas_explode(df, column_to_explode):\n\n # Create a list of new observations\n new_observations = list()\n\n # Iterate through existing observations\n for row in df.to_dict(orient='records'):\n\n # Take out the exploding iterable\n explode_values = row[column_to_explode]\n del row[column_to_explode]\n\n # Create a new observation for every entry in the exploding iterable & add all of the other columns\n for explode_value in explode_values:\n # Deep copy existing observation\n new_observation = copy.deepcopy(row)\n\n # Add one (newly flattened) value from exploding iterable\n new_observation[column_to_explode] = explode_value\n\n # Add to the list of new observations\n new_observations.append(new_observation)\n\n # Create a DataFrame\n return_df = pd.DataFrame(new_observations)\n\n # Return\n return return_df", "def read_csv(input_file):\n csv.register_dialect('my_dialect',\n delimiter = ',',\n skipinitialspace=True)\n\n csv_list_of_rows = []\n with open(input_file, 'r', encoding='utf-8', errors='replace') as csv_file:\n reader = csv.reader(csv_file, dialect='my_dialect')\n for row in reader:\n item_count = 0\n for item in row:\n if ',' in item:\n row[item_count] = '\"' + item + '\"'\n item_count += 1\n csv_list_of_rows += [row]\n return csv_list_of_rows", "def line_split(self, line):\n\t\tline = re.sub(r\"`(.*?)'\", quote_replace, line)\n\t\tline = line.translate(None, '.:,()+*')\n\t\treturn line.split()", "def split_value(string):\n split = string.split(',')\n result = []\n\n level = 0\n buf = []\n for entry in split:\n level += entry.count('(')\n level -= entry.count(')')\n\n buf.append(entry)\n if level == 0:\n result.append(','.join(buf))\n buf = []\n return result", "def remove_duplicates_from_string(df, *args, sep=', '):\n \n for column in args:\n if df[column].notna().any() and df[column].str.contains(sep).any():\n df[column].fillna('nan_value', inplace=True)\n df[column] = df[column].str.split(sep).apply(set).str.join(sep)\n df[column].replace({'nan_value': np.nan}, inplace=True)\n return df" ]
[ "0.64203936", "0.63070935", "0.61441594", "0.6137313", "0.5952938", "0.5780356", "0.57425", "0.5677074", "0.5605468", "0.544994", "0.5446234", "0.5424089", "0.53638244", "0.53548014", "0.53276426", "0.53026354", "0.52907306", "0.525217", "0.5158915", "0.51240396", "0.510114", "0.5099243", "0.50927806", "0.50794685", "0.50613785", "0.50587964", "0.5058082", "0.5052226", "0.50026774", "0.5000491" ]
0.66650534
0
Iterates over a dataframe and drops all rows that contain quotes as part of the string.
def drop_quote_rows(df): for i in df.columns.values: if df[i].dtype != 'datetime64[ns]' and df[i].dtype != 'float64': df = df[~df[i].str.contains('"', na=False)] return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean(df):", "def strip_columns(df: DataFrame) -> DataFrame:\r\n return df.apply(lambda x: x.str.strip() if x.dtype == 'object' else x)", "def __clean_column_names(self, columns):\r\n cols = []\r\n for column in columns:\r\n cols.append(column.replace('\"', ''))\r\n return cols", "def clean_df(self, df, column_name):\r\n \r\n df[column_name] = df[column_name].fillna('').str.replace('\\n', ' ')\r\n return df", "def remove_tags(df):\n # Remove '<|startoftext|>' and '<|endoftext|>\n things_to_remove = ['>', '<', '|']\n delete_table = dict.fromkeys(map(ord, things_to_remove), '')\n\n df.text = df.text.apply(lambda x: x.translate(delete_table))\n df.text = df.text.apply(lambda x: x.replace('startoftext', '').replace('endoftext', ''))\n\n df['text'].dropna(inplace=True)\n return df", "def removeQuotes(data):\n\tfor each in data:\n\t\tfor v in each.values():\n\t\t\tif not isinstance(v, list):\n\t\t\t\t# Not implemented because not true case\n\t\t\t\tpass\n\t\ttagValueData = dict(zip(each['k'], each['v']))\n\t\tfor tag, val in tagValueData.items():\n\t\t\tif str(tag).find('\"') != -1:\n\t\t\t\t_tag = str(tag).replace('\"', '')\n\t\t\t\teach['k'][each['k'].index(tag)] = _tag\n\t\t\tif str(val).find('\"') != -1:\n\t\t\t\t_val = str(val).replace('\"', '')\n\t\t\t\teach['v'][each['v'].index(val)] = _val\n\t\tyield each", "def _clean_dataset(df: pd.DataFrame) -> pd.DataFrame:\n df = df.loc[:, ~df.columns.str.contains(\"^Unnamed\")]\n df = df.dropna()\n return df", "def sanitize_diagnoses(df):\n df = df.str.replace(\"\\W\", \"\") # \"\\W\" regex represents ANY non-alphanumeric character\n# assert (df.str.contains(\"\\W\")).any(), \"At least one diagnosis has a non-alphanumeric character in it\"\n return df", "def data_cleaning(df):\n df = df.dropna(how='all')\n \n return df", "def trim_all_columns(df):\n trim_strings = lambda x: x.strip() if isinstance(x, str) else x\n return df.applymap(trim_strings)", "def strip_sql(data, sap_stat=True):\r\n tab_fixs = [[\"PCOGIS.SDE.\", ''],\r\n [\"Auxiliary Equipment\", \"AUXILLARYEQUIPMENT\"]]\r\n for old_str, new_str in tab_fixs:\r\n data['TABLE'] = data['TABLE'].str.replace(old_str, new_str)\r\n data = data.dropna(subset=['COLUMN'])\r\n bad_atts = [\" \", \"SHAPE_Length\", \"HVFUSES\", \"LVFUSES\", \"SHAPE_Area\",\r\n \"None\", \"ACTUALLENGTH\", \"DECOMMISSIONINGDATE\",\r\n \"DECOMMISSIONINGREASON\", 'LOTS YET TO ADD']\r\n data = data[~data['COLUMN'].isin(bad_atts)]\r\n bad_tabs = ['LocationAttributes', 'CustomerConnections', 'TBD']\r\n data = data[~data['TABLE'].isin(bad_tabs)]\r\n bad_tab_atts = [['SWITCHUNIT$', 'INTERRUPTINGMEDIUM$'],\r\n ['DistributionMain$', 'CROSSINGID$'],\r\n ['DistributionMain$', 'MOUNTINGTYPE$'],\r\n ['DistributionMain$', 'MOUNTINGPOSITION$']]\r\n for tab_str, att_str in bad_tab_atts:\r\n data = data[~(data['TABLE'].str.match(tab_str) &\r\n data['COLUMN'].str.match(att_str))]\r\n bad_doubles = [['Regulator$', 'SUBTYPECD$', 'y'],\r\n ['RegulatorStation$', 'EQUIPMENTID$', 'N'],\r\n ['SurfaceStructure$', 'APPLICATION$', 'N'],\r\n ['SurfaceStructure$', 'ENTRY$', 'N'],\r\n ['SurfaceStructure$', 'FACILITYID$', 'N'],\r\n ['SurfaceStructure$', 'MANUFACTURER$', 'N'],\r\n ['SurfaceStructure$', 'MATERIAL$', 'N'],\r\n ['SurfaceStructure$', 'MODEL$', 'N'],\r\n ['SurfaceStructure$', 'STRUCTURESIZE$', 'N'],\r\n ['COMMSPOWERSUPPLY$', 'BATTERYAMPERAGEHOURS$', 'N'],\r\n ['COMMSPOWERSUPPLY$', 'BATTERYCOUNT$', 'N'],\r\n ['PillarPoint$', 'DATEMANUFACTURED$', 'TBC'],\r\n ['PillarPoint$', 'FACILITYID$', 'TBC'],\r\n ['PillarPoint$', 'FEEDERID$', 'TBC'],\r\n ['PillarPoint$', 'NUMBEROFUSEDCIRCUITS$', 'TBC'],\r\n ['PillarPoint$', 'SUBTYPECD$', 'N'],\r\n ['PillarPoint$', 'TOTALNUMBEROFCIRCUITS$', 'TBC'],\r\n ['PillarPoint$', 'TRUENZMGPOS$', 'N'],\r\n ['SupportStructure$', 'HIGHESTVOLTAGE$', 'N'],\r\n ['SurfaceStructure$', 'ASSETFUNCTION$', 'N'],\r\n ['SurfaceStructure$', 'ENCLOSUREMANUFACTURER$', 'N'],\r\n ['SurfaceStructure$', 'ENCLOSURETYPE$', 'N'],\r\n ['SurfaceStructure$', 'GLOBALID$', 'N'],\r\n ['SurfaceStructure$', 'STREETNAME$', 'N'],\r\n ['SurfaceStructure$', 'STREETNO$', 'N'],\r\n ['SurfaceStructure$', 'SUBURB$', 'N'],\r\n ['SurfaceStructure$', 'SYMBOLROTATION$', 'N'],\r\n ['SurfaceStructure$', 'TOWN$', 'N'],\r\n ['Switch$', 'FACILITYID$', 'N'],\r\n ['Switch$', 'FEEDERID$', 'N'],\r\n ['Switch$', 'FEEDERID2$', 'N'],\r\n ['Switch$', 'GEONETFEEDERCODE$', 'N'],\r\n ['Switch$', 'GLOBALID$', 'N'],\r\n ['Switch$', 'GROUNDEDINDICATOR$', 'N'],\r\n ['Switch$', 'INSTALLATIONDATE$', 'N'],\r\n ['Switch$', 'MOUNTING$', 'N'],\r\n ['Switch$', 'NORMALPOSITION$', 'N'],\r\n ['Switch$', 'NUMPHASES$', 'N'],\r\n ['Switch$', 'OPERATINGVOLTAGE$', 'N'],\r\n ['Switch$', 'OUTOFORDERINDICATOR$', 'N'],\r\n ['Switch$', 'REFERENCE$', 'N'],\r\n ['Switch$', 'REMOTECONTROLLED$', 'N'],\r\n ['Switch$', 'REMOTEINDICATION$', 'N'],\r\n ['Switch$', 'RETICULATION$', 'N'],\r\n ['Switch$', 'SITEID$', 'N'],\r\n ['Switch$', 'STREETNAME$', 'N'],\r\n ['Switch$', 'STREETNO$', 'N'],\r\n ['Switch$', 'SUBTYPECD$', 'N'],\r\n ['Switch$', 'SUBURB$', 'N'],\r\n ['Switch$', 'SYMBOLROTATION$', 'N'],\r\n ['Switch$', 'TOWN$', 'N'],\r\n ['Switch$', 'WORKORDERID$', 'N'],\r\n ['SWITCHUNIT$', 'ARCQUENCHING$', 'N'],\r\n ['SWITCHUNIT$', 'C_INTJDEID$', 'N'],\r\n ['SWITCHUNIT$', 'COMMENTS$', 'N'],\r\n ['SWITCHUNIT$', 'DATEMANUFACTURED$', 'N'],\r\n ['SWITCHUNIT$', 'DATEPURCHASED$', 'N'],\r\n ['SWITCHUNIT$', 'INSTALLATIONDATE$', 'N'],\r\n ['SWITCHUNIT$', 'INSULATIONMEDIUM$', 'N'],\r\n ['SWITCHUNIT$', 'LOADBREAKINGCAPACITY$', 'N'],\r\n ['SWITCHUNIT$', 'MANUFACTURER$', 'N'],\r\n ['SWITCHUNIT$', 'MODEL$', 'N'],\r\n ['SWITCHUNIT$', 'NORMALCURRENTRATING$', 'N'],\r\n ['SWITCHUNIT$', 'NUMPHASES$', 'N'],\r\n ['SWITCHUNIT$', 'OWNER$', 'N'],\r\n ['SWITCHUNIT$', 'REFERENCE$', 'N'],\r\n ['SWITCHUNIT$', 'SERIALNUMBER$', 'N'],\r\n ['SWITCHUNIT$', 'VISUALEARTHINDICATOR$', 'N'],\r\n ['SWITCHUNIT$', 'VOLTAGERATING$', 'N'],\r\n ['SWITCHUNIT$', 'WORKORDERID$', 'N'],\r\n ['UndergroundStructure$', 'C_INTJDEID$', 'N'],\r\n ['UndergroundStructure$', 'COMMENTS$', 'N'],\r\n ['UndergroundStructure$', 'FACILITYID$', 'N'],\r\n ['UndergroundStructure$', 'FEEDERID$', 'N'],\r\n ['UndergroundStructure$', 'GLOBALID$', 'N'],\r\n ['UndergroundStructure$', 'HIGHESTVOLTAGE$', 'N'],\r\n ['UndergroundStructure$', 'INSTALLATIONDATE$', 'N'],\r\n ['UndergroundStructure$', 'OUTOFORDERINDICATOR$', 'N'],\r\n ['UndergroundStructure$', 'OWNER$', 'N'],\r\n ['UndergroundStructure$', 'REFERENCE$', 'N'],\r\n ['UndergroundStructure$', 'STREETNAME$', 'N'],\r\n ['UndergroundStructure$', 'STREETNO$', 'N'],\r\n ['UndergroundStructure$', 'SUBURB$', 'N'],\r\n ['UndergroundStructure$', 'SYMBOLROTATION$', 'N'],\r\n ['UndergroundStructure$', 'TOWN$', 'N'],\r\n ['UndergroundStructure$', 'WORKORDERID$', 'N'],\r\n ['Fuse$', 'INSTALLATIONDATE$', 'N'],\r\n ['Ground$', 'BELOWGROUNDCONNECTION$', 'N'],\r\n ['POWERTRANSFORMERUNIT$', 'COOLINGTYPE$', 'TBD'],\r\n ['POWERTRANSFORMERUNIT$', 'COOLINGTYPE2$', 'TBD'],\r\n ['POWERTRANSFORMERUNIT$', 'COOLINGTYPE3$', 'TBD'],\r\n ['POWERTRANSFORMERUNIT$', 'CTBURDENVA$', 'N'],\r\n ['POWERTRANSFORMERUNIT$', 'CTCLASS$', 'N'],\r\n ['POWERTRANSFORMERUNIT$', 'CTQUANTITY$', 'N'],\r\n ['POWERTRANSFORMERUNIT$', 'CTRATIO$', 'N'],\r\n ['POWERTRANSFORMERUNIT$', 'IMPEDANCE2$', 'N'],\r\n ['POWERTRANSFORMERUNIT$', 'IMPEDANCE3$', 'N'],\r\n ['POWERTRANSFORMERUNIT$', 'IMPEDANCEZ0$', 'N'],\r\n ['POWERTRANSFORMERUNIT$', 'RATEDMVA$', 'N'],\r\n ['POWERTRANSFORMERUNIT$', 'RATEDMVA2$', 'N'],\r\n ['POWERTRANSFORMERUNIT$', 'RATEDMVA3$', 'N'],\r\n ['AUXILLARYEQUIPMENT$', 'MANUFACTURER$', 'N'],\r\n ['AUXILLARYEQUIPMENT$', 'MODEL$', 'N'],\r\n ['COMMSPOWERSUPPLY$', 'BATTERYTYPE$', 'N'],\r\n ['SupportStructure$', 'FUNCTION_$', 'N'],\r\n ['COMMSPOWERSUPPLY$', 'GENERATORFUELTYPE$', 'N'],\r\n ['COMMSPOWERSUPPLY$', 'HOURSOFSUPPLY$', 'N'],\r\n ['COMMSPOWERSUPPLY$', 'PARALELLCOUNT$', 'N'],\r\n ['COMMSPOWERSUPPLY$', 'PARALELLCOUNT$', 'TBD'],\r\n ['COMMSPOWERSUPPLY$', 'SYSTEMVOLTAGE$', 'TBD'],\r\n ['SurfaceStructure$', 'TRUENZMGPOS$', 'N'],\r\n ['SupportStructure$', 'ABSOLUTE$', 'N'],\r\n ['DISTTRANSFUSEUNIT$', 'VOLTAGERATING$', 'N'],\r\n ['DISTTRANSFUSEUNIT$', 'WORKORDERID$', 'N'],\r\n ['SupportStructure$', 'FEEDERID$', 'TBC'],\r\n ['SupportStructure$', 'SHAPE$', ' N'],\r\n ['SupportStructure$', 'SUBTYPECD$', 'TBD'],\r\n ['SupportStructure$', 'TREATMENTTYPE$', 'N'],\r\n ['SupportStructure$', 'TRUENZMG$', 'N'],\r\n ['SupportStructure$', 'TYPEOFTOP$', 'N'],\r\n ['SupportStructure$', 'USAGETYPE$', 'N']]\r\n if sap_stat is True:\r\n for tab_str, att_str, sap_str in bad_doubles:\r\n data = data[~(data['TABLE'].str.match(tab_str) &\r\n data['COLUMN'].str.match(att_str) &\r\n data['SAP'].str.match(sap_str))]\r\n bad_null = [['SurfaceStructure$', 'ENCLOSURE$'],\r\n ['SurfaceStructure$', 'ENCLOSUREMANUFACTURER$'],\r\n ['SurfaceStructure$', 'ENCLOSURETYPE$'],\r\n ['Fuse$', 'ACCURACY$'],\r\n ['Fuse$', 'ANCILLARYROLE$'],\r\n ['Fuse$', 'ASSETFUNCTION$'],\r\n ['Fuse$', 'C_INTJDEID$'],\r\n ['Fuse$', 'COMMENTS$'],\r\n ['Fuse$', 'CREATIONUSER$'],\r\n ['Fuse$', 'DATECREATED$'],\r\n ['Fuse$', 'DATEMODIFIED$'],\r\n ['Fuse$', 'DEVICETYPE$'],\r\n ['Fuse$', 'ELECTRICTRACEWEIGHT$'],\r\n ['Fuse$', 'ENABLED$'],\r\n ['Fuse$', 'FACILITYID$'],\r\n ['Fuse$', 'FEEDERID$'],\r\n ['Fuse$', 'FEEDERID2$'],\r\n ['Fuse$', 'FEEDERINFO$'],\r\n ['Fuse$', 'GEONETFEEDERCODE$'],\r\n ['Fuse$', 'GEONETFEEDERID$'],\r\n ['Fuse$', 'GEONETSUBSTATION$'],\r\n ['Fuse$', 'GLOBALID$'],\r\n ['Fuse$', 'INSTALLEDBY$'],\r\n ['Fuse$', 'LABELTEXT$'],\r\n ['Fuse$', 'LASTUSER$'],\r\n ['Fuse$', 'MANUFACTURER$'],\r\n ['Fuse$', 'MAXCONTINUOUSCURRENT$'],\r\n ['Fuse$', 'MAXINTERRUPTINGCURRENT$'],\r\n ['Fuse$', 'MAXOPERATINGVOLTAGE$'],\r\n ['Fuse$', 'MOUNTING$'],\r\n ['Fuse$', 'NOMINALVOLTAGE$'],\r\n ['Fuse$', 'NORMALPOSITION$'],\r\n ['Fuse$', 'NUMPHASES$'],\r\n ['Fuse$', 'OBJECTID$'],\r\n ['Fuse$', 'OPERATINGVOLTAGE$'],\r\n ['Fuse$', 'OUTOFORDERINDICATOR$'],\r\n ['Fuse$', 'OWNER$'],\r\n ['Fuse$', 'PARENTID$'],\r\n ['Fuse$', 'PHASEDESIGNATION$'],\r\n ['Fuse$', 'PREMISE$'],\r\n ['Fuse$', 'PRESENTPOSITION$'],\r\n ['Fuse$', 'RDB_UFID$'],\r\n ['Fuse$', 'REFERENCE$'],\r\n ['Fuse$', 'REMOTECONTROLLED$'],\r\n ['Fuse$', 'REMOTEINDICATION$'],\r\n ['Fuse$', 'RETICULATION$'],\r\n ['Fuse$', 'SCADACONTROLMECHANISM$'],\r\n ['Fuse$', 'SCADACONTROLTYPE$'],\r\n ['Fuse$', 'SCADAPTID$'],\r\n ['Fuse$', 'SHAPE$'],\r\n ['Fuse$', 'SITEID$'],\r\n ['Fuse$', 'STREETNAME$'],\r\n ['Fuse$', 'STREETNO$'],\r\n ['Fuse$', 'SUBTYPECD$'],\r\n ['Fuse$', 'SUBURB$'],\r\n ['Fuse$', 'SYMBOLROTATION$'],\r\n ['Fuse$', 'TIMESTAMP$'],\r\n ['Fuse$', 'TOWN$'],\r\n ['Fuse$', 'TYPE$'],\r\n ['Fuse$', 'WORKORDERID$'],\r\n ['Fuse$', 'ZONE$']]\r\n for tab_str, att_str in bad_null:\r\n data = data[~(data['TABLE'].str.match(tab_str) &\r\n data['COLUMN'].str.match(att_str) &\r\n data['SAP'].isnull())]\r\n return data", "def remove_col(df):\n df[\"bag_of_words\"] = \"\"\n columns = df.columns\n for index, row in df.iterrows():\n words = \"\"\n for col in columns:\n if col != \"Director\":\n words = words + \" \".join(row[col]) + \" \"\n else:\n words = words + row[col] + \" \"\n row[\"bag_of_words\"] = words\n df.drop(columns=[col for col in df.columns if col != \"bag_of_words\"], inplace=True)\n return df", "def destringify_df(df: pd.DataFrame, separator=\"\\t\"):\n df = split_stuff(df, separator=separator).T\n df = dataframe_stuff(df)\n return df", "def trim_text_for_dataframe(self, data: pd.DataFrame) -> pd.DataFrame:\n for column_name in data:\n data[column_name] = self.trim_text_for_column(\n column_data=data[column_name],\n threshold=self.config[f'{column_name}_upper_length_limit'],\n trim_utils=self.trim_utils,\n )\n return data", "def strip_whitespace(df, column=None):\n\n if column is None:\n for x in df.columns:\n if df[x].dtypes == object:\n df[x] = pd.core.strings.str_strip(df[x])\n df[x] = df[x].str.replace('\\n', '')\n df[x] = df[x].str.replace(r'\\r', ' ', regex=True)\n df[x] = df[x].str.replace(r'\\n', ' ', regex=True)\n df[x] = df[x].str.replace(r'\\v', ' ', regex=True)\n elif isinstance(column, list):\n for x in column:\n if df[x].dtypes == object:\n df[x] = pd.core.strings.str_strip(df[x])\n df[x] = df[x].str.replace(r'\\r', ' ', regex=True)\n df[x] = df[x].str.replace(r'\\n', ' ', regex=True)\n df[x] = df[x].str.replace(r'\\v', ' ', regex=True)\n\n else:\n if df[column].dtypes == object:\n df[column] = pd.core.strings.str_strip(df[column])\n df[column] = df[column].str.replace(r'\\r', ' ', regex=True)\n df[column] = df[column].str.replace(r'\\n', ' ', regex=True)\n df[column] = df[column].str.replace(r'\\v', ' ', regex=True)\n\n return df", "def remove_duplicates_from_string(df, *args, sep=', '):\n \n for column in args:\n if df[column].notna().any() and df[column].str.contains(sep).any():\n df[column].fillna('nan_value', inplace=True)\n df[column] = df[column].str.split(sep).apply(set).str.join(sep)\n df[column].replace({'nan_value': np.nan}, inplace=True)\n return df", "def parse_table_to_madx_remove_str(name: str, df: pd.DataFrame) -> str:\n # start sequence edit\n text = \"USE, SEQUENCE={};\\n\".format(name)\n text += \"SEQEDIT, SEQUENCE = {}; \\nFLATTEN;\\n\".format(name)\n for _, row in df.iterrows():\n line = \"REMOVE, ELEMENT = {:16};\\n\".format(row[\"name\"])\n text += line\n\n # end sequence edit\n text += \"FLATTEN;\\nENDEDIT;\"\n\n return text", "def clean_lines(df_column):\n \n clean_lines = []\n # pattern for html tags\n tag_match = re.compile('<.*?>')\n # patterm for website\n website_match = re.compile('https?:\\/\\/.*[\\r\\n]*')\n # pattern for tex\n tex_match = re.compile('\\$\\$?.+?\\$\\$?')\n \n for line in df_column:\n s = re.sub(tag_match, '', line)\n s = re.sub(website_match, '[website]', s)\n s = re.sub(tex_match, '[tex]', s)\n # replace extra whitespace with spaces\n for x in string.whitespace:\n s = s.replace(x, ' ')\n clean_lines.append(s)\n \n return clean_lines", "def old_strip_sql(data, sap_stat=True):\r\n tab_fixs = [[\"PCOGIS.SDE.\", ''],\r\n [\"Auxiliary Equipment\", \"AUXILLARYEQUIPMENT\"]]\r\n for old_str, new_str in tab_fixs:\r\n data['TABLE'] = data['TABLE'].str.replace(old_str, new_str)\r\n data = data.dropna(subset=['COLUMN'])\r\n bad_atts = [\" \", \"SHAPE_Length\", \"HVFUSES\", \"LVFUSES\", \"SHAPE_Area\",\r\n \"ACTUALLENGTH\", \"DECOMMISSIONINGDATE\", \"DECOMMISSIONINGREASON\"]\r\n data = data[~data['COLUMN'].isin(bad_atts)]\r\n bad_tab_atts = [['SWITCHUNIT$', 'INTERRUPTINGMEDIUM$'],\r\n ['DistributionMain$', '^CROSSINGID$'],\r\n ['DistributionMain$', '^MOUNTINGTYPE$'],\r\n ['DistributionMain$', '^MOUNTINGPOSITION$']]\r\n for tab_str, att_str in bad_tab_atts:\r\n data = data[~(data['TABLE'].str.match(tab_str) &\r\n data['COLUMN'].str.match(att_str))]\r\n bad_doubles = [['Regulator$', '^SUBTYPECD$', 'y'],\r\n ['RegulatorStation$', '^EQUIPMENTID$', 'N'],\r\n ['SurfaceStructure$', '^APPLICATION$', 'N'],\r\n ['SurfaceStructure$', '^ENTRY$', 'N'],\r\n ['SurfaceStructure$', '^FACILITYID$', 'N'],\r\n ['SurfaceStructure$', '^MANUFACTURER$', 'N'],\r\n ['SurfaceStructure$', '^MATERIAL$', 'N'],\r\n ['SurfaceStructure$', '^MODEL$', 'N'],\r\n ['SurfaceStructure$', '^STRUCTURESIZE$', 'N'],\r\n ['COMMSPOWERSUPPLY$', '^BATTERYAMPERAGEHOURS$', 'N'],\r\n ['COMMSPOWERSUPPLY$', '^BATTERYCOUNT$', 'N']]\r\n if sap_stat is True:\r\n for tab_str, att_str, sap_str in bad_doubles:\r\n data = data[~(data['TABLE'].str.match(tab_str) &\r\n data['COLUMN'].str.match(att_str) &\r\n data['SAP'].str.match(sap_str))]\r\n bad_null = [['SurfaceStructure$', '^ENCLOSURE$'],\r\n ['SurfaceStructure$', '^ENCLOSURETYPE$'],\r\n ['SurfaceStructure$', '^ENCLOSUREMANUFACTURER$']]\r\n for tab_str, att_str in bad_null:\r\n data = data[~(data['TABLE'].str.match(tab_str) &\r\n data['COLUMN'].str.match(att_str) &\r\n data['SAP'].isnull())]\r\n return data", "def trimDf(df):\n cols = set(df.columns)\n\n cols.remove('exclamationCount') # bug in our feature extraction code\n cols.remove('price') # considered only free apps\n cols.remove('appName') # removing appNames\n\n # return df[list(cols)]\n\n\n\n return df[list(('revSent', 'appLabel'))]", "def df_cleaner(df):\n df = df.dropna()\n return df", "def check_if_quotations(string):\n quote_found_double = False\n quote_found_single = False\n for i in range(len(string)):\n if string[i] == '\"':\n quote_found_double = True\n if string[i] == \"'\":\n quote_found_single = True\n if quote_found_double == True:\n string = remove(string, '\"')\n if quote_found_single == True:\n string = remove(string, \"'\")\n return string", "def remove_empty_rows(dataframe: pd.DataFrame, column_name: str):\n original_size = len(dataframe)\n dataframe[column_name].replace(\" \", np.nan, inplace=True)\n dataframe[column_name].replace(\"\", np.nan, inplace=True)\n dataframe.dropna(subset=[column_name], inplace=True)\n dataframe.reset_index(drop=True, inplace=True)\n new_size = len(dataframe)\n print(f\"A total of {original_size - new_size} rows were dropped\")", "def df_cleaner(df):\n return df.dropna()", "def remove_non_numeric(df, column='Phone'):\n\n if isinstance(column, list):\n for x in column:\n if x in df.columns.values:\n df[x] = df[x].replace(r'[^0-9]', '', regex=True)\n return df\n else:\n df[column] = df[column].replace(r'[^0-9]', '', regex=True)\n return df", "def clean_data(df):\n cleaned = []\n for row,i in zip(df['text'],df.index):\n # if ':' in row:\n # row = row.split(':')[1]\n text = re.sub('https:[\\w.\\/]*','',row)\n # a = re.sub(r'[\\.@]', '', row)\n cleaned.append(text)\n df['clean_text'] = pd.Series(cleaned)\n return df", "def dataCleaner(dataframe):\r\n dataframe = dataframe.dropna(how='all')\r\n for col in dataframe:\r\n dataframe[col] = dataframe[col].apply(lambda x : np.nan() if str(x).isspace() else x)\r\n dataframe[col] = dataframe[col].fillna(dataframe[col].mean())\r\n return dataframe", "def removeEscapingDoubleQuoteInSQLString(string, forceDoubleQuote=True):\n if string is None:\n return string\n\n string = string.replace('\"\"', '\"')\n\n if forceDoubleQuote:\n string = '\"' + string + '\"'\n return string", "def strip_quotes(text: str) -> str:\n\t\tpieces = [\n\t\t\t\t('`', '`'),\n\t\t\t\t(Chars.lsq, Chars.rsq), (Chars.ldq, Chars.rdq), (\"'\", \"'\"), ('\"', '\"')\n\t\t\t]\n\t\treturn StringTools.strip_paired(text, pieces)", "def clean_rows(reader):\n return [[a.strip() for a in row] for row in reader if row]" ]
[ "0.626898", "0.60871345", "0.6035571", "0.5847739", "0.58360153", "0.58146024", "0.5808672", "0.5807812", "0.57319236", "0.57052547", "0.5661681", "0.56058556", "0.55961424", "0.557724", "0.5502874", "0.54684365", "0.5449995", "0.5445038", "0.5423044", "0.54162115", "0.5396272", "0.5394988", "0.5357674", "0.53282315", "0.53258246", "0.53236103", "0.5317339", "0.53143156", "0.5307293", "0.5303716" ]
0.8157656
0
Chooses the date closest to the given date in a given pandas series in the given direction.
def closest_date(series, date=pd.to_datetime('today'), period='future'): x = series.copy() x = x.append(pd.Series(date, index=[len(x.index)])) x = x.ix[pd.to_datetime(x).sort_values().index] x = x.reset_index(drop=True) index_today = x[x == date].head(1) if period == 'future': if x.tail(1).values == index_today.values: return closest_date(series, date=date, period='past') closest_date_in_future = x[int(index_today.index.values) + 1] return closest_date_in_future elif period == 'past': closest_date_in_past = x[int(index_today.index.values) - 1] return closest_date_in_past
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def closest_row(dataframe, column, value):\n sort = dataframe.iloc[(dataframe[column]-value).abs().argsort()[:1]]\n return sort", "def next_determination_date(ddates,clo_idx):\n dd = ddates.loc[ddates['Fund']==clo_idx,'Determination Date']\n next_ddate = min(dd.loc[dd>pd.Timestamp.today()], key=lambda s: (s-pd.Timestamp.today()))\n return next_ddate", "def closest_point(point, points):\n return points[cdist([point], points).argmin()]", "def locate_nearest_event(self):\n nearest_event_date = ''\n min = 1000000\n today = self.get_today()\n event_array = self.events.keys()\n for event_date in event_array:\n event_date = self.date_to_operate_format(event_date)\n if int(event_date) - int(today) > 0:\n if int(event_date) - int(today) < min:\n min = int(event_date) - int(today)\n nearest_event_date = event_date\n\n nearest_event = '0'\n if len(event_array) > 0:\n nearest_event = self.change_format_to_database_index(nearest_event_date)\n\n return nearest_event", "def get_nearest_payday(\n date: date_class,\n frequency: timedelta,\n holidays: list,\n given_payday: date_class,\n default_payday: WeekNamePlaceholder) -> date_class:\n _payday = given_payday\n \n if date < given_payday: # Go Backward\n # change frequency to negative\n frequency = timedelta(days=-1*frequency.days)\n while (_payday > date):\n _payday = update_payday(\n payday=_payday,\n frequency=frequency,\n holidays=holidays,\n default_payday=default_payday\n )\n else: # Go Forward\n while (_payday < date):\n _payday = update_payday(\n payday=_payday,\n frequency=frequency,\n holidays=holidays,\n default_payday=default_payday\n )\n\n return _payday", "def _closest_date(target_dt, date_list, before_target=None) -> datetime.date | None:\n\n def time_before(d):\n return target_dt - d if d <= target_dt else datetime.timedelta.max\n\n def time_after(d):\n return d - target_dt if d >= target_dt else datetime.timedelta.max\n\n def any_time(d):\n return target_dt - d if d < target_dt else d - target_dt\n\n if before_target is None:\n return min(date_list, key=any_time).date()\n if before_target:\n return min(date_list, key=time_before).date()\n else:\n return min(date_list, key=time_after).date()", "def find_first_value(self, value, closest=False):\n value = pd.to_datetime(value)\n value = column.as_column(value).as_numerical[0]\n return self.as_numerical.find_first_value(value, closest=closest)", "def closest_point(self, point, start_param=None, Ns=25):\n x, z = self.rotate_to_xz_plane(point)\n la = self._closest_point(x, z, start_param, Ns)\n return la", "def nearest(df:pd.DataFrame,\n year:int,\n vector_col:str,\n level:str='label') -> pd.Series:\n # this methods obtains the quotation closest in time for each sense of a lemma. \n # get idx of quotations nearest in time for each sense\n \n df['temp_dist'] = abs(df.year - year)\n quots_nn_time_idx = df.groupby(level)['temp_dist'].idxmin().values\n \n # get the quotations and the sense idx\n return df.loc[quots_nn_time_idx][[level,vector_col]].set_index(level,inplace=False)[vector_col]", "def get_closest_due_date(self, due_dates: List[DueDate], current_day=None) -> DueDate:\n if current_day is None:\n current_day = Today()\n\n diff_list = []\n # calculate the difference between current day and date string\n for due_date in due_dates:\n if len(due_date.date_string) > 0:\n day = Day(datetime.strptime(due_date.date_string, self.vars.date_format))\n timedelta1 = day.to_date_time() - current_day.to_date_time()\n diff_list.append(timedelta1.days)\n\n # return the date string using the smallest difference\n for index, diff_num in enumerate(diff_list):\n if diff_num >= 0:\n return due_dates[index]", "def find_next_valid(data, date):\n correct_date = None\n while correct_date is None:\n try:\n _ = data.loc[date]\n correct_date = date\n except KeyError:\n date = add_time(date, day=1)\n return correct_date", "def closest(point, points):\n pts = [(Point.distance(point, p), p) for p in points]\n pts.sort()\n return pts[0][1]", "def next_payment_date(ddates,clo_idx):\n dd = ddates.loc[ddates['Fund']==clo_idx,'Payment Date'].dropna()\n next_date = min(dd.loc[dd>pd.Timestamp.today()], key=lambda s: (s-pd.Timestamp.today()))\n return next_date", "def FindClosestPoint(self, ):\n ...", "def prior_determination_date(ddates,clo_idx):\n dd = ddates.loc[ddates['Fund']==clo_idx,'Determination Date']\n prior_ddate = max(dd.loc[dd<pd.Timestamp.today()], key=lambda s: (s-pd.Timestamp.today()))\n return prior_ddate", "def _next_trading_day(self, day):\n next_day = self._trading_days.shift(-1)[day]\n return next_day if not pd.isnull(next_day) else None", "def fcl(df, dtObj):\r\n return df.iloc[np.argmin(np.abs(pd.to_datetime(df.index) - dtObj))] # remove to_pydatetime()\r", "def next_release_date(date):\n df = get_release_dates()\n df = df[df['ReleaseDate'] > date]\n return df['ReleaseDate'].iloc[0]", "def get_price_on_or_before_date(date, prices):\n for i in range(6):\n current_date = date - timedelta(days=i)\n if current_date in prices:\n return float(prices[current_date]), i\n return (None, None)", "def _nearest_to_point(self, point):\n ptvertex = point.get_vertex(crs=self.crs)\n segments = zip(self.vertices.slice(0, -1), self.vertices.slice(1, 0))\n\n if isinstance(self.crs, CartesianCRS):\n func = _cvectorgeo.pt_nearest_planar\n def func(seg):\n return _cvectorgeo.pt_nearest_planar(ptvertex[0], ptvertex[1],\n seg[0][0], seg[0][1], seg[1][0], seg[1][1])\n else:\n fwd = self.crs.forward\n inv = self.crs.inverse\n def func(seg):\n return _cvectorgeo.pt_nearest_proj(fwd, inv, ptvertex,\n seg[0], seg[1], tol=0.01)\n\n point_dist = map(func, segments)\n min_point = None\n min_dist = -1.0\n for i, (point, dist) in enumerate(point_dist):\n if dist < min_dist or (i == 0):\n min_point = point\n min_dist = dist\n\n return min_dist, min_point", "def find_closest_point(point, street, streetvolume):\r\n streetdf = streetvolume[streetvolume['streetname'] == street]\r\n if streetdf.shape[0] == 0:\r\n streetdf = streetvolume\r\n streetdf['pdistance'] = streetdf['geometry'].apply(lambda x: point.distance(x))\r\n streetdf.sort_values(by = 'pdistance', ascending = True, inplace = True)\r\n return streetdf['lineid'].iloc[0]", "def closest_ds_partition(\n table, ds, before=True, schema=\"default\", metastore_conn_id=\"metastore_default\"\n) -> str | None:\n from airflow.providers.apache.hive.hooks.hive import HiveMetastoreHook\n\n if \".\" in table:\n schema, table = table.split(\".\")\n hive_hook = HiveMetastoreHook(metastore_conn_id=metastore_conn_id)\n partitions = hive_hook.get_partitions(schema=schema, table_name=table)\n if not partitions:\n return None\n part_vals = [next(iter(p.values())) for p in partitions]\n if ds in part_vals:\n return ds\n else:\n parts = [datetime.datetime.strptime(pv, \"%Y-%m-%d\") for pv in part_vals]\n target_dt = datetime.datetime.strptime(ds, \"%Y-%m-%d\")\n closest_ds = _closest_date(target_dt, parts, before_target=before)\n if closest_ds is not None:\n return closest_ds.isoformat()\n return None", "def leftshift_series(series):\n leftshifted_x = [(series.index[idx] - series.index[0]).days for idx in range(len(series.index))]\n return pd.Series(data=series.values, index=leftshifted_x)", "def checkValidDate(self, nextDate, startDate):\n\t\tcurrentDay = nextDate.day\n\t\tcurrentMonth = nextDate.month\n\t\tcurrentYear = nextDate.year\n\t\t\n\t\tstartDay = startDate.day\n\t\tcurrentDate = datetime.datetime(currentYear, currentMonth, startDay)\n\t\t\n\t\tminDaysDiff = 99\n\t\tresult = None\n\t\t\n\t\tfor dataPoint in self.dataPoints:\n\t\t\tdataPointDate = dataPoint.getDate()\n\t\t\tdaysDiff = (dataPointDate - currentDate).days\n\t\t\t\t\t\t\n\t\t\tif daysDiff == 0:\n\t\t\t\treturn dataPointDate\n\t\t\telif daysDiff >= 0 and daysDiff < minDaysDiff:\n\t\t\t\tminDaysDiff = daysDiff\n\t\t\t\tresult = dataPointDate\n\t\treturn result", "def get_closest(a, n):\n pos = bisect_left(a, n)\n if pos == 0:\n return a[0]\n if pos == len(a):\n return a[-1]\n before = a[pos - 1]\n after = a[pos]\n if after - n < n - before:\n return after\n else:\n return before", "def test_second_date_lower(self):\n input_ = (datetime.date(2015, 10, 24), datetime.date(2014, 12, 12))\n with self.assertRaises(ValueError):\n self.expander._get_next_days(*input_)", "def compute_open_lead(df, shift=-1):\n open_lead = df.groupby(\"store\")[\"open\"].shift(shift)\n return open_lead.combine_first(df[\"day_of_week\"].ne(6).astype(\"double\"))", "def closest_point(\n self, points: Union[List[\"PointMixin\"], \"PointMixin\"]\n ) -> pd.Series:\n from ..core.distance import closest_point as cp\n\n # The following cast secures the typing\n self = cast(\"Flight\", self)\n\n if not isinstance(points, list):\n points = [points]\n\n return min(\n (cp(self.data, point) for point in points),\n key=attrgetter(\"distance\"),\n )", "def _get_next_order_date(self, line, start_date):\n self.ensure_one()\n next_date = fields.Date.from_string(self.start_date)\n while next_date <= start_date:\n next_date = self.__get_next_term_date(\n next_date, line.ordering_unit, line.ordering_interval)\n return next_date", "def closest(self, x, y):\n if self.direction == 'horizontal':\n p_pts = np.array([\n self.ax.transData.transform((p, 0))[0] for p in self.positions\n ])\n dist = abs(p_pts - x)\n else:\n p_pts = np.array([\n self.ax.transData.transform((0, p))[1] for p in self.positions\n ])\n dist = abs(p_pts - y)\n index = np.argmin(dist)\n return index, dist[index]" ]
[ "0.5489094", "0.53745514", "0.5371786", "0.5279156", "0.517958", "0.5167142", "0.5166287", "0.51227057", "0.5106809", "0.51014584", "0.50864875", "0.5069313", "0.50628966", "0.5034019", "0.495855", "0.4945508", "0.49299702", "0.49279854", "0.4911208", "0.48947218", "0.4872206", "0.48322126", "0.4831165", "0.4828562", "0.48205146", "0.48105898", "0.48012224", "0.47486165", "0.473879", "0.4733078" ]
0.7016062
0
function call for every view before Django choose witch view would be called. function ask user`s browser for Negotiate token
def process_request(self, request, *args, **kwargs): if not settings.GSSAPI_ENABLED_OPTION: return None unauthorized = False if 'HTTP_AUTHORIZATION' in request.META: kind, initial_client_token = request.META['HTTP_AUTHORIZATION'].split(' ', 1) if kind != 'Negotiate': unauthorized = True else: unauthorized = True if unauthorized: response = HttpResponse(request, status=401) response['WWW-Authenticate'] = 'Negotiate' return response return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lesson_auth(request):", "def login_required(view):\n def check_login(request,*args,**kwds):\n if not request.user.is_authenticated():\n api = FlattrAPI(secrets.FLATTR_API_KEY,secrets.FLATTR_API_SECRET)\n callback = reverse(oauth_callback)\n callback += \"?next=\" + urllib.quote(request.get_full_path(),\"\")\n callback = request.build_absolute_uri(callback)\n (token,url) = api.request_access_token(callback,\"click\")\n print \"OBTAINING REQ TOKEN\", token.key, token.secret\n t = APIToken.objects.create(id=token.key,secret=token.secret)\n t.save()\n print \"REDIRECTING TO\", url\n return HttpResponseRedirect(url)\n return view(request,*args,**kwds)\n return check_login", "def login_required(view):\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if APP_KEY_ACCESS_TOKEN not in request.cookies:\n return redirect(GM_OAUTH_URL)\n return view(**kwargs)\n return wrapped_view", "def login_web_required(view_func):\r\n @wraps(view_func, assigned=available_attrs(view_func))\r\n def _wrapped_view_func(request, *args, **kwargs):\r\n if hasattr(request, \"session\") and request.session.get('is_logon', False) and request.user.is_active:\r\n return view_func(request, *args, **kwargs)\r\n else:\r\n return HttpResponse(FailResponse(u'请先登录'))\r\n return _wrapped_view_func", "async def token(request: Request):\n return get_token()", "def i_am_in_the_login_page(browser):", "def start_auth(request):\n # create the client to Indivo\n client = get_indivo_client(request, with_session_token=False)\n \n # do we have a record_id?\n record_id = request.GET.get('record_id', None)\n carenet_id = request.GET.get('carenet_id', None)\n \n # prepare request token parameters\n params = {'oauth_callback':'oob'}\n if record_id:\n params['indivo_record_id'] = record_id\n if carenet_id:\n params['indivo_carenet_id'] = carenet_id\n\n # request a request token\n req_token = client.fetch_request_token(params)\n\n # store the request token in the session for when we return from auth\n request.session['request_token'] = req_token\n \n # redirect to the UI server\n return HttpResponseRedirect(client.auth_redirect_url)", "def auth_token(self):", "def login_require(request):\n\n if request.method == \"GET\":\n data = request.GET\n else:\n data = request.POST\n user = authenticate(username=data[\"username\"], password=data[\"password\"])\n if user and user.is_active:\n ret = Response(SUCCESS, error_code[SUCCESS])\n else: \n ret = Response(AUTHENTICATION_FAIL, error_code[AUTHENTICATION_FAIL])\n return HttpResponse(ret.serialize(f))\n\n # Generate a token for authentication\n token = token_generator(30)\n try:\n user_token = Token.objects.get(username=data[\"username\"])\n user_token.token = token\n user_token.start_time = datetime.now()\n except: \n user_token = Token(token=token, username=data[\"username\"])\n user_token.save()\n ret.set_ret(\"auth_token\", token) \n user = User.objects.get(username=data[\"username\"])\n ret.set_ret(\"data\", UserSerializer(user.appuser).serialize())\n return HttpResponse(ret.serialize(f))", "def request(self, token):\n pass", "def identify(request):\n return None", "def csrf_protect():\n if request.method == \"POST\" and request.path[0:5] != \"/api/\":\n token = login_session.pop('_csrf_token', None)\n request_token = request.form.get('_csrf_token')\n print(\"Comparing server token [\" + token + \"]\")\n print(\"with client token [\" + request_token + \"]\")\n if not token or token != request_token:\n print(\"Tokens do not match! Aborting..\")\n abort(403)\n print(\"Tokens match - accepted\")", "def require_auth(view):\n def wrapper(request, *args):\n \n if not request.session.get('user_id', False):\n return HttpResponseRedirect(\"/clanovi/login/\")\n \n return view(request, *args) \n return wrapper", "def auth_required(self, view):\n\n @functools.wraps(view)\n def decorated(*args, **kwargs):\n log.info(\"Trying to get access to protected resource: '%s'\", view.__name__)\n if request.method == 'POST':\n token = request.form['token']\n if self.development or self.authenticated(token):\n return view(*args, **kwargs)\n else:\n log.warning(\"User has not been authorized to get access to resource: %s\", view.__name__)\n else:\n log.warning(\"Bad request type! Expected 'POST', actual '%s'\", request.method)\n\n return abort(403)\n\n return decorated", "def do_authenticate():\n #try:\n if 1:\n if 'referer' not in self.session:\n path = urlsplit(self.request.url)[2]\n self.session['referer'] = path\n self.session.put()\n #except:\n # pass\n aobj = self.config.auth_obj()\n self.get_controller()\n auth_res = aobj.auth(self.controller, *args, **kws)\n if auth_res:\n return func(*args, **kws)\n aobj.auth_redirect(self.controller, *args, **kws)\n # clear controller for development environment.", "def apply_auth():\n\tclient = BaiduOpenApi()\n\tapi = client.device.code\n\tresp = client.device.code.get(response_type=\"device_code\", scope=\"netdisk\")\n\t# open grant page and wait for user confirm\n\twebbrowser.open_new_tab(r\"http://openapi.baidu.com/device?code=%s\"%resp[\"user_code\"])\n\t# yield to main\n\tyield\n\t# main will tell user to confirm and it will take a while\n\t# polling to wait server back\n\tpolling_tokens(resp[\"device_code\"], resp[\"interval\"], resp[\"expires_in\"])", "def logged_in(view):\n\n @wraps(view)\n def fn(request, secret, *args, **kargs):\n try:\n trader_id, trader_has_not_visited_lately = db.get_loginkey_info(\n hashlib.md5(secret.encode('ascii')).hexdigest() )\n if trader_id:\n if trader_has_not_visited_lately and settings.CMBARTER_MAINTAIN_IP_WHITELIST:\n client_ip = get_client_ip(request)\n if client_ip:\n db.insert_whitelist_entry(trader_id, client_ip)\n # Render the response with some HTTP-headers added.\n response = view(request, secret, trader_id, *args, **kargs)\n if 'Cache-Control' not in response:\n response['Cache-Control'] = 'no-cache, must-revalidate'\n response['Expires'] = 'Mon, 26 Jul 1997 05:00:00 GMT'\n response['Last-Modified'] = datetime.datetime.now(pytz.utc).strftime(\n \"%d %b %Y %H:%M:%S GMT\")\n response['Pragma'] = 'no-cache'\n return response\n else:\n return login(request, method='GET')\n\n except curiousorm.PgError, e:\n if (getattr(e, 'pgcode', '')==curiousorm.RAISE_EXCEPTION and \n A_TURN_IS_RUNNING.search(getattr(e, 'pgerror', ''))):\n return render(request, settings.CMBARTER_TURN_IS_RUNNING_MOBILE_TEMPLATE)\n else:\n raise\n \n return fn", "def gr_init(): # method to establish auth and create cookies (user will go here first)\n\n if request.args.get(\"authorize\") != \"1\":\n request_token, request_token_secret = goodreads.get_request_token(header_auth=True) # request a token to use\n\n authorize_url = goodreads.get_authorize_url(request_token) # url for authorization\n\n res = make_response(render_template(\"gr_redirect.html\", search=None, app_name=app_name, url=authorize_url,\n previous_searches=previous_searches,\n authorized=request.args.get(\"authorize\"),\n oauth_token=None)) # redirect to gr_redirect\n\n res.set_cookie('rt', request_token, max_age=60 * 60 * 24 * 365 * 2)\n res.set_cookie('rts', request_token_secret,\n max_age=60 * 60 * 24 * 365 * 2) # make cookies using the token information\n else:\n res = make_response(render_template(\"gr_redirect.html\", search=None, app_name=app_name, url=None,\n previous_searches=previous_searches,\n authorized=request.args.get(\"authorize\"),\n oauth_token=request.cookies.get(\"rt\"))) # redirect to gr_redirect\n\n return res # set the cookies and go to gr_redirect.html for redirecting to authorize_url", "def authenticate(self, request):\n return None", "def get(self, request, *args, **kwargs):\n token = request.GET.get('hub.verify_token')\n challenge = request.GET.get('hub.challenge')\n if token == VERIFY_TOKEN:\n if challenge:\n return HttpResponse(challenge)\n else:\n HttpResponse('Error, invalid challenge')\n else:\n return HttpResponse('Error, invalid token')", "def process_view(self, request, view_func, *view_args, **view_kwargs):\n # Nothing to do when not demo mode.\n if not settings.DEMO_MODE:\n return None\n\n if view_func in self.safe_views:\n return None # continue handling request\n return HttpResponseForbidden()", "def get(self, request, *args, **kwargs):\n token = request.GET.get('hub.verify_token')\n challenge = request.GET.get('hub.challenge')\n if token == os.getenv('FB_VERIFY_TOKEN'):\n if challenge:\n return HttpResponse(challenge)\n else:\n HttpResponse('Error, invalid challenge')\n else:\n return HttpResponse('Error, invalid token')", "def prePresent(self, request):", "def before_request():\n user_agent = request.headers.get(\"User-Agent\")\n\n if user_agent is None:\n return \"A user agent must be provided\", 401\n\n lowercase_user_agent = user_agent.lower()\n\n if \"rift\" in lowercase_user_agent:\n logger.debug(\"Detected Rift as user agent (%r)\", user_agent)\n return \"Rift not allowed\", 401\n if \"python\" in lowercase_user_agent:\n logger.debug(\"Detected Python as user agent (%r)\", user_agent)\n return \"Python requests not allowed\", 401\n if \"yandex\" in lowercase_user_agent:\n logger.debug(\"Detected Yandex as user agent (%r)\", user_agent)\n return \"Yandex bots are not allowed\", 401\n if \"smtbot\" in lowercase_user_agent:\n logger.debug(\"Detected SMT as user agent (%r)\", user_agent)\n return \"SMT Bots are not allowed\", 401\n if \"nimbostratus\" in lowercase_user_agent:\n logger.debug(\"Detected Nimbostratus as user agent (%r)\", user_agent)\n return \"Nimbostratus bots are not allowed\", 401\n if \"bot\" in lowercase_user_agent:\n logger.warning(\"Detected unkown bot as user agent (%r)\", user_agent)\n return \"Bots are not allowed\", 401\n if user_agent == \"-\":\n logger.debug(\"Not user agent provided (%r)\", user_agent)\n return \"A user agent must be provided\", 401\n\n return", "def get_user_authorization(request_token):\n authorize_url = AUTHORIZE_URL\n authorize_url = authorize_url.format(request_token=request_token)\n print 'Please go here and authorize: ' + authorize_url\n return raw_input('Please input the verifier: ')", "def EstablishAuthToken(self, opener):\n raise NotImplementedError()", "def _before_request():\n\n g.user = current_user", "def token_browser_profile(request): # URL: 'quest-token-profile'\n m = hashlib.md5()\n m.update('%s |*| %s |*| %s |*| %s' % ( request.POST.get('os', ''),\n request.POST.get('display', ''),\n request.POST.get('software', ''),\n request.POST.get('browser', '')))\n\n profile = Profile(ua_string=request.POST.get('browser', '')[0:255],\n software=request.POST.get('software', '')[0:10000],\n os=request.POST.get('os', '')[0:50],\n display=request.POST.get('display', '')[0:255])\n profile.hashid = m.hexdigest()\n profile.save()\n\n request.session['profile'] = profile.hashid\n request.session.save()\n return HttpResponse('K')", "def after_auth(request):\n # get the token and verifier from the URL parameters\n oauth_token, oauth_verifier = request.GET['oauth_token'], request.GET['oauth_verifier']\n \n # retrieve request token stored in the session\n token_in_session = request.session['request_token']\n \n # is this the right token?\n if token_in_session['oauth_token'] != oauth_token:\n return HttpResponse(\"oh oh bad token\")\n \n # get the indivo client and use the request token as the token for the exchange\n client = get_indivo_client(request, with_session_token=False)\n client.update_token(token_in_session)\n access_token = client.exchange_token(oauth_verifier)\n \n # store stuff in the session\n request.session['access_token'] = access_token\n \n if access_token.has_key('xoauth_indivo_record_id'):\n request.session['record_id'] = access_token['xoauth_indivo_record_id']\n if request.session.has_key('carenet_id'):\n del request.session['carenet_id']\n else:\n if request.session.has_key('record_id'):\n del request.session['record_id']\n request.session['carenet_id'] = access_token['xoauth_indivo_carenet_id']\n \n return index(request)", "def negotiate( request, variants ):" ]
[ "0.621399", "0.6102215", "0.6026413", "0.5909619", "0.58216614", "0.5788693", "0.5766099", "0.5750259", "0.57497793", "0.57260203", "0.57218957", "0.5685113", "0.56807905", "0.5671643", "0.56470495", "0.56376934", "0.56284064", "0.56181693", "0.5616399", "0.55913", "0.5590202", "0.55875146", "0.5559966", "0.5556991", "0.55501026", "0.5541836", "0.5537433", "0.553666", "0.55243754", "0.5516401" ]
0.61591715
1
Create a dataset from the given parameters. Note that despite the type hinting, ``domain`` is only semioptional, as a domain is required to create a
async def create_dataset(self, dataset_name: str, category: DataCategory, domain: Optional[DataDomain] = None, **kwargs) -> bool: # If a domain wasn't passed, generate one from the kwargs, or raise and exception if we can't if domain is None: data_format = kwargs.pop('data_format', None) if data_format is None: msg = "Client can't create dataset with `None` for {}, nor generate a default {} without a provided {}" raise ValueError(msg.format(DataDomain.__name__, DataDomain.__name__, DataFormat.__name__)) print_msg = "INFO: no {} provided; dataset will be created with a basic default domain using format {}" print(print_msg.format(DataDomain.__name__, data_format.name)) # If neither provided, bootstrap a basic restriction on the first index variable in the data format if not ('discrete_restrictions' in kwargs or 'continuous_restrictions' in kwargs): c_restricts = None d_restricts = [DiscreteRestriction(variable=data_format.indices[0], values=[])] # If at least one is provided, use whatever was passed, and fallback to None for the other if needed else: c_restricts = list(kwargs.pop('continuous_restrictions')) if 'continuous_restrictions' in kwargs else [] d_restricts = list(kwargs.pop('discrete_restrictions')) if 'discrete_restrictions' in kwargs else [] domain = DataDomain(data_format=data_format, continuous_restrictions=c_restricts, discrete_restrictions=d_restricts) # Finally, ask the client to create the dataset, passing the details return await self.dataset_client.create_dataset(dataset_name, category, domain, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, domain_keys, require_domain=True, datasets=None):\n assert isinstance(domain_keys, list) or isinstance(domain_keys, str)\n if isinstance(domain_keys, list):\n self.domain_keys = domain_keys\n elif isinstance(domain_keys, str):\n self.domain_keys = [x for x in domain_keys.split(',')]\n self.require_domain = require_domain\n self.domain_dict = dict(zip(self.domain_keys, range(len(self.domain_keys))))\n\n if datasets is None:\n datasets = []\n for domain_key in self.domain_keys:\n extra_args = {k: v for dic in [self.domain_specific_params(), self.domain_default_params()] for k, v in dic.items()}\n datasets += [self.get_single_dataset(domain_key, **extra_args)]\n super(DomainDatasetBase, self).__init__(datasets)", "def CreateDataset(all_arrays):\n dataset = Dataset()\n\n dataset._addData(all_arrays[0])\n dataset._addData(all_arrays[1])\n dataset._addData(all_arrays[3])\n dataset._addData(all_arrays[5])\n dataset._addData(all_arrays[6])\n dataset._addData(all_arrays[9])\n dataset._addData(all_arrays[8])\n dataset._addData(all_arrays[4])\n\n return dataset", "def create_dataset(dataset_type, soruce, opts): \n\n p = PreProcessor(dataset_type, opts)\n\n # If we are NOT running \"implementation.py\", we read the data from file\n if dataset_type == \"train\" or dataset_type == \"dev\" or dataset_type == \"test\":\n path_to_data = soruce\n p.read_labelled_data(path_to_data) \n # Otherwise, we read the sentence that \"implementation.py\" gave us\n elif dataset_type == \"submit\":\n submission_sentence = soruce\n p.read_test_data(submission_sentence)\n\n # Encode all the data to a list of torchTensors\n encoded_tokens, encoded_pred, encoded_tokens_pos, encoded_labels = p.encode_all_data()\n # Create SRL dataset\n dataset = SRLDataset(x=encoded_tokens, pr=encoded_pred, p=encoded_tokens_pos, y=encoded_labels)\n print(\"{} dataset size is {}\".format(dataset_type, len(dataset)))\n\n if dataset_type == \"train\" or dataset_type == \"dev\" or dataset_type == \"test\":\n return dataset\n elif dataset_type == \"submit\":\n return dataset, p.list_l_original_predicates", "def _create_dataset(self, *data):\n # Make sure data is a tuple of dense tensors\n data = [self._to_torch(x, dtype=torch.FloatTensor) for x in data]\n return TensorDataset(*data)", "def create_dataset(opt):\n\tdata_loader = CustomDatasetDataLoader(opt)\n\tdataset = data_loader.load_data()\n\treturn dataset", "def CreateValidationDataset(all_arrays):\n validation_dataset = Dataset()\n validation_dataset._addData(all_arrays[2])\n validation_dataset._addData(all_arrays[7])\n return validation_dataset", "def get_dataset(args):\n\n if args['experiment']['dataset'] == Dataset.mindsets:\n xs, ys, cs = make_mindsets(mindset_sizes=args['dataset']['mindset_sizes'],\n nb_questions=args['dataset']['nb_questions'],\n nb_useless=args['dataset']['nb_useless'],\n noise=args['dataset']['noise'],\n seed=args['experiment']['seed'])\n\n return Data(xs=xs, ys=ys, cs=cs)\n\n if args['experiment']['dataset'] == Dataset.questionnaire_likert:\n xs, ys, cs = make_likert_questionnaire(nb_samples=args['dataset']['nb_samples'],\n nb_features=args['dataset']['nb_features'],\n nb_mindsets=args['dataset']['nb_mindsets'],\n centers=args['dataset']['centers'],\n range_answers=args['dataset']['range_answers'],\n seed=args['experiment']['seed'])\n\n return Data(xs=xs, ys=ys, cs=cs)\n\n if args['experiment']['dataset'] == Dataset.retinal:\n xs, ys = load_RETINAL(root_path=args['root_dir'],\n nb_bins=args['dataset']['nb_bins'],\n max_idx=args['dataset']['max_idx'])\n\n return Data(xs=xs, ys=ys)\n\n if args['experiment']['dataset'] == Dataset.moons:\n xs, ys = make_moons(n_samples=args['dataset']['n_samples'],\n noise=args['dataset']['noise'],\n random_state=args['experiment']['seed'])\n\n return Data(xs=xs, ys=ys)\n\n if args['experiment']['dataset'] == Dataset.breast_cancer_wisconsin:\n xs, ys = load_CANCER(args['dataset']['nb_bins'])\n\n return Data(xs=xs, ys=ys)\n\n if args['experiment']['dataset'] == Dataset.SBM:\n A, ys, G = load_SBM(block_sizes=args['dataset']['block_sizes'],\n p_in=args['dataset']['p'],\n p_out=args['dataset']['q'],\n seed=args['experiment']['seed'])\n\n return Data(ys=ys, A=A, G=G)\n\n if args['experiment']['dataset'] == Dataset.gaussian_mixture:\n xs, ys = make_blobs(n_samples=args['dataset']['blob_sizes'],\n centers=args['dataset']['blob_centers'],\n n_features=args['dataset']['blob_centers'],\n cluster_std=args['dataset']['blob_variances'],\n random_state=args['experiment']['seed'])\n\n return Data(xs=xs, ys=ys)\n\n if args['experiment']['dataset'] == Dataset.LFR:\n A, ys, G = load_LFR(nb_nodes=args['dataset']['nb_nodes'],\n tau1=args['dataset']['tau1'],\n tau2=args['dataset']['tau2'],\n mu=args['dataset']['mu'],\n average_degree=args['dataset']['average_degree'],\n min_community=args['dataset']['min_community'],\n seed=args['experiment']['seed'])\n\n return Data(ys=ys, A=A, G=G)\n\n if args['experiment']['dataset'] == Dataset.wave:\n df = pd.read_csv('datasets/waveform.csv')\n xs = df[df.columns[:-1]].to_numpy()\n ys = df[df.columns[-1]].to_numpy()\n\n return Data(xs=xs, ys=ys)\n\n raise ValueError('Wrong name for a dataset')", "def create_dataset(opt):\n data_loader = CustomDatasetDataLoader(opt)\n dataset = data_loader.load_data()\n return dataset", "def get(dataset_name: str, split: Union[Tuple[str, float], str, tfds.Split],\n **hyperparameters: Any) -> BaseDataset:\n hyperparameters_py = {\n k: (v.numpy().tolist() if isinstance(v, tf.Tensor) else v)\n for k, v in hyperparameters.items()\n }\n logging.info('Building dataset %s with additional kwargs:\\n%s', dataset_name,\n json.dumps(hyperparameters_py, indent=2, sort_keys=True))\n if dataset_name not in DATASETS:\n raise ValueError('Unrecognized dataset name: {!r}'.format(dataset_name))\n\n dataset_class = DATASETS[dataset_name]\n return dataset_class(split=split, **hyperparameters)", "def create_dataset(self, config, rng):\n raise NotImplementedError()", "def _init_dataset(self, data_config, split='train'):\n assert split in {'train', 'valid'}\n\n # load datasets\n print(f'Load {split} dataset')\n if data_config['type'] == 'npy':\n dataset = MSDMelDataset(\n data_config['mel_root'], data_config[f'{split}_tids_fn'],\n data_config['label_fn'], on_mem=data_config['on_mem'],\n ignore_intersection=data_config['ignore_label_intersection'],\n transform=ToVariable())\n\n elif data_config['type'] == 'hdf':\n dataset = MSDMelHDFDataset(\n data_config['hdf_fn'], data_config[f'{split}_tids_fn'],\n data_config['label_fn'],\n ignore_intersection=data_config['ignore_label_intersection'],\n transform=ToVariable())\n\n elif data_config['type'] == 'audio':\n dataset = MSDAudioDataset(\n data_config['audio_root'], data_config[f'{split}_tids_fn'],\n data_config['tid2path_fn'], data_config['label_fn'],\n ignore_intersection=data_config['ignore_label_intersection'],\n device='cpu',\n transform=ToVariable())\n\n return dataset", "def make_dataset(arguments):\n connections.create_connection(hosts=ES_HOSTS, timeout=9999, http_auth=ES_LOGIN)\n time_start = time.time()\n\n # cleanup of the invalid dataset duplicate links\n if arguments.clean:\n dataset_source_cleanup()\n\n # reset dataset assignments of posts with given roles\n if arguments.reset is not None and len(arguments.reset) > 0:\n reset_dataset_flags(arguments.reset)\n\n # create dataset base from main posts and their duplicates\n if arguments.base:\n link_search = PostLink.search().filter(\"term\", link_type=3).params(scroll=\"1440m\")\n links = link_search.scan()\n\n time_start_partial = time.time()\n print(\"Creating dataset base from duplicates ...\")\n for i, link in enumerate(links):\n add_main_post_into_ds(Post.get_post(link.post_ID, link.page))\n if i % 10 == 0:\n print(f\"\\r Processing - {i}\", end=\"\")\n time_partial = time.time() - time_start_partial\n print(f\" {int(time_partial / 60)} min, {int(time_partial % 60)} s\")\n print()\n\n # search and assign similar posts to all main posts in dataset\n if arguments.similar is not None:\n time_start_partial = time.time()\n print(\"Getting similar posts for posts in dataset base ...\")\n print(f\" Part: {arguments.similar}\")\n pool = Pool(PARALLEL_SLICES)\n pool.map(process_similar_posts, range(PARALLEL_SLICES))\n\n time_partial = time.time() - time_start_partial\n print(f\" {int(time_partial / 60)} min, {int(time_partial % 60)} s\")\n print()\n\n # export the dataset to CSV file\n if arguments.export:\n time_start_partial = time.time()\n print(\"Exporting whole dataset to general csv...\")\n export_dataset_to_csv()\n time_partial = time.time() - time_start_partial\n print(f\" {int(time_partial / 60)} min, {int(time_partial % 60)} s\")\n\n time_start_partial = time.time()\n print(\"Shuffling and splitting the general csv into train, dev and test parts...\")\n shuffle_and_split(DS_EXPORT_FILE)\n time_partial = time.time() - time_start_partial\n print(f\" {int(time_partial / 60)} min, {int(time_partial % 60)} s\")\n\n time_end = time.time()\n time_total = time_end - time_start\n print(\"Dataset created successfully ...\")\n print(f\"Dataset creation process took {int(time_total / 60)} min and {int(time_total % 60)} seconds\")", "def create_dataset(name, grid=None, samples=1000, seed=0):\n if grid == None:\n np.random.seed(seed)\n points = 1 - 2 * np.random.rand(samples, 2)\n else:\n x = np.linspace(-1, 1, grid)\n points = np.array(list(product(x, x)))\n creator = globals()[f\"_{name}\"]\n\n x, y = creator(points)\n return x, y", "def dataset(request):\n X, y = make_classification(\n n_samples=700, n_features=10, n_informative=8, n_redundant=2,\n n_classes=2, n_clusters_per_class=2, random_state=6483\n )\n\n request.cls.dataset = Dataset(X, y)", "def provide_domained_dataset(tfds_name,\n batch_size,\n patch_size,\n split='train',\n num_parallel_calls=None,\n shuffle=True,\n domains=('Black_Hair', 'Blond_Hair', 'Brown_Hair'),\n download=True,\n data_dir=None):\n # ds = tfds.load('celeb_a', split=split, shuffle_files=shuffle)\n print(\"[**] Load tf data source: {tfdata_source}\".format(tfdata_source=tfds_name))\n ds = tfds.load(tfds_name, split=split, shuffle_files=shuffle, download=download, data_dir=data_dir)\n\n def _filter_pred(attribute):\n def _filter(element):\n return element['attributes'][attribute]\n return _filter\n dss = tuple([ds.filter(_filter_pred(attribute)) for attribute in domains])\n ds = tf.data.Dataset.zip(dss)\n\n def _preprocess(*elements):\n \"\"\"Map elements to the example dicts expected by the model.\"\"\"\n output_dict = {}\n num_domains = len(elements)\n for idx, (domain, elem) in enumerate(zip(domains, elements)):\n uint8_img = elem['image']\n patch = data_provider.full_image_to_patch(uint8_img, patch_size)\n label = tf.one_hot(idx, num_domains)\n output_dict[domain] = {'images': patch, 'labels': label}\n return output_dict\n\n ds = (ds\n .map(_preprocess, num_parallel_calls=num_parallel_calls)\n .cache()\n .repeat())\n if shuffle:\n ds = ds.shuffle(buffer_size=10000, reshuffle_each_iteration=True)\n ds = (ds\n .batch(batch_size, drop_remainder=True)\n .prefetch(tf.data.experimental.AUTOTUNE))\n\n return ds", "def dynamic(seq: List[int]):\n return Data._create_dataset(seq, pad=False)", "def make_dataset(dataset_name):\n return {\n\n 'duc': DUCDataset(),\n\n 'icsi-asr': ICSIASRDataset(),\n 'icsi-ht': ICSIHumanTranscriptDataset(),\n\n 'inspec-train': InspectTrainingDataset(),\n 'inspec-val': InspectValidationDataset(),\n 'inspec-test': InspectTestDataset(),\n\n 'nus': NUSDataset()\n\n }[dataset_name]", "def _create_dataset(source=''):\n return ExperimentalDataset()", "def get_dataset(opts):\n dataset_type = opts.dataset_params.dataset_type\n if dataset_type in 'synth':\n return synthgraph.SynthGraphDataset(opts, opts.dataset_params)\n elif dataset_type in 'synthnoise':\n return synthgraph.SynthNoiseGraphDataset(opts, opts.dataset_params)\n elif dataset_type in 'synthoutlier':\n return synthgraph.SynthOutlierGraphDataset(opts, opts.dataset_params)\n elif dataset_type in 'rome16kgeom':\n return spreal.GeomKNNRome16KDataset(opts, opts.dataset_params)\n elif dataset_type in 'graffiti':\n return graffiti.GraffitiDataset(opts, opts.dataset_params)\n else:\n print(\"ERROR: Dataset type {} not implemented yet\".format(dataset_type))\n sys.exit(1)", "def create_dataset(dims, size, num_clusters=20):\n clusters, delta = gen_k_centers(num_clusters, dims)\n return _create_constrained_dataset(clusters, delta, size)", "def _create_dataset(batch_size):\n ds = collections.OrderedDict([('x', [[-1.0, -1.0], [1.0, 1.0], [1.0, 1.0]]),\n ('y', [[1.0], [1.0], [1.0]])])\n # Note: batching is needed here as it creates the required batch dimension.\n # The batch size can be re-set (by `unbatch()` first) in personalization.\n return tf.data.Dataset.from_tensor_slices(ds).batch(batch_size)", "def prepare_dataset(self, xs: List[str], ys: List[str], batch_size: int = None):\n\n if batch_size is None:\n batch_size = self.cM.batch_size\n\n examples = [data.Example.fromlist([x, y], self.data_fields) for x, y in zip(xs, ys)]\n\n dataset = data.Dataset(examples, fields=self.data_fields)\n\n iterator = data.BucketIterator(dataset, batch_size=batch_size, shuffle=False)\n\n return iterator", "def make_dataset(condition, root, base_path, files_json_path):\n normalize = transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n dataset = ZapposDataset(root, base_path, files_json_path, condition,\n transform=transforms.Compose([\n transforms.Scale(112),\n transforms.CenterCrop(112),\n transforms.ToTensor(),\n normalize,\n ]))\n return dataset", "def init_existing_dataset(directory, source_datasets, uuid_=None, source_hostname=None):\n md = ptype.DatasetMetadata(\n id_=uuid_,\n # Default creation time is creation of an image.\n creation_dt=datetime.datetime.utcfromtimestamp(directory.stat().st_ctime),\n lineage=ptype.LineageMetadata(\n machine=ptype.MachineMetadata(\n hostname=source_hostname\n ),\n source_datasets=source_datasets\n )\n )\n _note_package_vers(md)\n return md", "def data_creator(config):\n train_dataset, val_dataset = LinearDataset(2, 5), LinearDataset(2, 5)\n train_loader = DataLoader(train_dataset, batch_size=config[\"batch_size\"])\n val_loader = DataLoader(val_dataset, batch_size=config[\"batch_size\"])\n return train_loader, val_loader", "def __init__(self, arg=None, column=None, row=None, x=None, y=None, **kwargs):\n super(Domain, self).__init__(\"domain\")\n\n # Validate arg\n # ------------\n if arg is None:\n arg = {}\n elif isinstance(arg, self.__class__):\n arg = arg.to_plotly_json()\n elif isinstance(arg, dict):\n arg = _copy.copy(arg)\n else:\n raise ValueError(\n \"\"\"\\\nThe first argument to the plotly.graph_objs.table.Domain \nconstructor must be a dict or \nan instance of :class:`plotly.graph_objs.table.Domain`\"\"\"\n )\n\n # Handle skip_invalid\n # -------------------\n self._skip_invalid = kwargs.pop(\"skip_invalid\", False)\n\n # Import validators\n # -----------------\n from plotly.validators.table import domain as v_domain\n\n # Initialize validators\n # ---------------------\n self._validators[\"column\"] = v_domain.ColumnValidator()\n self._validators[\"row\"] = v_domain.RowValidator()\n self._validators[\"x\"] = v_domain.XValidator()\n self._validators[\"y\"] = v_domain.YValidator()\n\n # Populate data dict with properties\n # ----------------------------------\n _v = arg.pop(\"column\", None)\n self[\"column\"] = column if column is not None else _v\n _v = arg.pop(\"row\", None)\n self[\"row\"] = row if row is not None else _v\n _v = arg.pop(\"x\", None)\n self[\"x\"] = x if x is not None else _v\n _v = arg.pop(\"y\", None)\n self[\"y\"] = y if y is not None else _v\n\n # Process unknown kwargs\n # ----------------------\n self._process_kwargs(**dict(arg, **kwargs))\n\n # Reset skip_invalid\n # ------------------\n self._skip_invalid = False", "def simple_dataset() -> Dataset:\n graph = Dataset()\n graph.default_context.add((EGSCHEME.subject, EGSCHEME.predicate, EGSCHEME.object))\n graph.default_context.add((EGURN.subject, EGURN.predicate, EGURN.object))\n graph.default_context.add((EGDC.subject, EGDC.predicate, Literal(\"typeless\")))\n graph.get_context(EGSCHEME.graph).add(\n (EGSCHEME.subject, EGSCHEME.predicate, EGSCHEME.object)\n )\n graph.get_context(EGSCHEME.graph).add(\n (EGSCHEME.subject, EGSCHEME.predicate, Literal(12))\n )\n graph.get_context(EGSCHEME.graph).add(\n (\n EGDC.subject,\n EGDC.predicate,\n Literal(\"日本語の表記体系\", lang=\"jpx\"),\n )\n )\n graph.get_context(EGSCHEME.graph).add(\n (EGURN.subject, EGSCHEME.predicate, EGSCHEME.subject)\n )\n graph.get_context(EGURN.graph).add(\n (EGSCHEME.subject, EGSCHEME.predicate, EGSCHEME.object)\n )\n graph.get_context(EGURN.graph).add((EGSCHEME.subject, EGDC.predicate, EGDC.object))\n graph.get_context(EGURN.graph).add(\n (EGSCHEME.subject, EGDC.predicate, Literal(\"XSD string\", datatype=XSD.string))\n )\n return graph", "def create_dataset(dataset_name):\n dataset_as_lower = dataset_name.lower()\n if dataset_as_lower in _datasets_from_keras.keys():\n data_details = _datasets_from_keras[dataset_as_lower]\n (x_train, y_train), (x_test, y_test) = data_details['data'].load_data()\n else:\n raise IOError(\"Dataset {0} is NOT supported\".format(dataset_name))\n\n # Performing pre-processing specifically for images datasets.\n if data_details['data type'] == 'image':\n x_train = _pre_process_images(x_train, data_details)\n x_test = _pre_process_images(x_test, data_details)\n\n return x_train, y_train, x_test, y_test", "def _dataset_fn(ctx=None):\n batch_size = ctx.get_per_replica_batch_size(\n global_batch_size) if ctx else global_batch_size\n dataset = input_pipeline.create_classifier_dataset(\n input_file_pattern,\n max_seq_length,\n batch_size,\n is_training=is_training,\n input_pipeline_context=ctx)\n return dataset", "def create_domain(DomainName=None):\n pass" ]
[ "0.70908016", "0.6585426", "0.6346205", "0.6299749", "0.6184952", "0.6145635", "0.6139993", "0.61318284", "0.6127892", "0.6063784", "0.5988405", "0.59247696", "0.5912318", "0.5860009", "0.5854653", "0.5846334", "0.5820806", "0.58134025", "0.5787504", "0.577296", "0.57687443", "0.57588536", "0.5757791", "0.575617", "0.57523555", "0.5750968", "0.57500625", "0.57089716", "0.5703866", "0.5694701" ]
0.68479204
1
Request the status of the provided job, represented in string form.
async def request_job_status(self, job_id: str, *args, **kwargs) -> str: # TODO: implement raise NotImplementedError('{} function "request_job_status" not implemented yet'.format(self.__class__.__name__))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def job_status(self, job_id):\n url = self.base_url + \"/ml-service/phoenix-ml/job/status?id={0}\".format(job_id)\n # url = \"http://10.1.2.110:8199/phoenix-ml/job/status?id=12\"\n headers = {\"ApiKey\": self.api_key}\n response = requests.get(url=url, headers=headers)\n return response.text", "def _check_job_status(self) -> str:\n self._assert_job_created()\n\n r = requests.post(\n f'https://{cc.ROUTE_PREFIX}.stratodem.com/jobs/status',\n headers=dict(\n Authorization=f'Bearer {get_api_token()}',\n ),\n json=dict(job_id=self._job_id)\n )\n\n if not r.status_code == 200:\n raise APIQueryFailedException('Failed to determine job status')\n\n r = r.json()\n\n if not r['success']:\n raise APIQueryFailedException(r)\n else:\n return r['message']", "def jobStatus(self, jobId):\n params = {'id': jobId}\n try:\n resp = self.gc.get(JobUtils.JOB_ID_PATH, parameters=params)\n except HttpError as e:\n if e.status == 400:\n print('Error. invalid job id:', jobId)\n return {}\n raise\n\n if not resp:\n return ''\n\n status = resp.get('status')\n\n statusStr = JobUtils.getJobStatusStr(status)\n return statusStr", "def request_status(job_id):\n status = _database_operations.get_status(job_id, Session())\n if status is None:\n flask.abort(404)\n else:\n return json.dumps({\n 'status': status.status,\n 'finished': status.finished\n })", "def get_status(job_id):\n job = fetch_data.AsyncResult(job_id, app=app)\n return jsonify({'job_id': job_id, 'status': job.status})", "def get_status(self):\n url = \"data_request?id=jobstatus&job=%d&plugin=zwave\" % self.id\n return self.vera.get(url)", "def status(self, job_id: str) -> dict:\n session = self._session()\n response = session.get(self._status_url(job_id))\n if response.ok:\n fields = [\n 'status', 'message', 'progress', 'createdAt', 'updatedAt', 'request',\n 'numInputGranules'\n ]\n status_subset = {k: v for k, v in response.json().items() if k in fields}\n return {\n 'status': status_subset['status'],\n 'message': status_subset['message'],\n 'progress': status_subset['progress'],\n 'created_at': dateutil.parser.parse(status_subset['createdAt']),\n 'updated_at': dateutil.parser.parse(status_subset['updatedAt']),\n 'request': status_subset['request'],\n 'num_input_granules': int(status_subset['numInputGranules']),\n }\n else:\n response.raise_for_status()", "def get_job_status(jobid, wait=30):\n cmd = \"scontrol show job {0}\".format(jobid)\n try:\n output = subprocess.check_output(cmd, shell=True)\n m = re.search(\"JobState=(\\w+)\", output)\n except subprocess.CalledProcessError:\n m = False\n\n status = None\n if m:\n status = m.group(1)\n else:\n repeat = 0\n while not m and repeat < wait:\n cmd = \"sacct -b -j {0}\".format(jobid)\n output = subprocess.check_output(cmd, shell=True)\n m = re.search(\"{0}\\s+([A-Z]+)\".format(jobid), output)\n time.sleep(1)\n repeat += 1\n if m:\n status = m.group(1)\n\n if status is None:\n raise ValueError(\"Job not found: {0}\".format(jobid))\n else:\n return status", "def get_job_status(self, job, context=None):\n return self._client.call_method(\n 'UserAndJobState.get_job_status',\n [job], self._service_ver, context)", "def get(self, job_id):\n\n if job_id:\n status = {\"state\": self.runner_service.status(job_id)}\n else:\n # TODO: Update the correct status for all jobs; the filtering in jobrunner doesn't work here.\n all_status = self.runner_service.status_all()\n status_dict = {}\n for k, v in all_status.iteritems():\n status_dict[k] = {\"state\": v}\n status = status_dict\n\n self.write_json(status)", "def get_status(self, job_id):\n\n result = self.redis.get('job_status:' + str(job_id))\n return pickle.loads(result) if result else None", "def get_status(job_key):\n job = Job.fetch(job_key, connection=conn)\n\n logs_url = \"{}{}/runner/logs/{}\".format(request.url_root, API_VERSION, job_key)\n status_dict = {\"status\": \"\", \"logs_url\": logs_url}\n return_code = 200\n if job.is_finished:\n status_dict['status'] = \"success\"\n return_code = 200\n elif job.is_failed:\n status_dict['status'] = \"terminal\"\n return_code = 400\n else:\n status_dict['status'] = \"running\"\n status_dict['logs_url'] = \"\"\n return_code = 202\n\n return jsonify(status_dict), return_code", "def test_cachedjob_get_status(cached_job):\n \n # Setup\n c_job = cached_job\n \n # Execute\n expected_status = StatusEnum(JOB_DETAILS_HTML['status'])\n cached_status = c_job.status\n\n # Verify\n assert expected_status == cached_status", "def job_status(self, job_id):\n\n response = self.batch_client.describe_jobs(jobs=[job_id])\n return response[\"jobs\"][0][\"status\"]", "def get_status(self):\n\t\treturn call_sdk_function('PrlJob_GetStatus', self.handle)", "def getJobStatusStr(status):\n if not isinstance(status, int):\n return ''\n\n return JobUtils.JOB_STATUS.get(status, '')", "def check_job_status(self, jobid=None):\n\n if jobid is None:\n if hasattr(self, 'current_job'):\n jobid = self.current_job\n else:\n jobid = self.current_job\n\n response = self._request(\n 'GET', CosmoSim.QUERY_URL + '/{}'.format(jobid) + '/phase',\n auth=(self.username, self.password), data={'print': 'b'},\n cache=False)\n\n log.info(\"Job {}: {}\".format(jobid, response.content))\n return response.content", "def status(self) -> str:\n return self._check_job_status()", "def job_status(bot, update, args, job_queue, chat_data):\n if len(args) == 0:\n update.message.reply_text('No parameter provided')\n return\n\n job_name = args[0]\n if job_name not in settings.JOBS:\n update.message.reply_text(\n 'Sorry {0} is not a valid job'.format(job_name))\n return\n\n job = find_job(job_name, job_queue)\n\n if not job:\n update.message.reply_text('{0} job is not running'.format(job_name))\n return\n\n update.message.reply_text('{0} job is running'.format(job_name))", "def statusJob(self, job):\n with self.thread_lock:\n name = job.name\n job_container = self.shared_dags[job]\n job_dag = job_container.getDAG()\n\n # If there is no timing, then the job is not finished\n if job_container.getTime():\n job_container.addCaveat('time: ' + job_container.getTime())\n if job.getResult() == False:\n self.active.remove(job)\n self.killJobs()\n return\n else:\n self.job_queue_count -= 1\n job_dag.delete_node(job)\n self.active.remove(job)\n if self.args.download_only:\n result = ' -Downloaded | '\n else:\n result = ' --Finished | '\n\n else:\n result = ' Launching | '\n\n # Format job name length field\n name_cnt = (self.term_width - len(job.name)) + 2 # 2 character buffer\n result = strftime(\"%H:%M\") + result + job.name + ' '*name_cnt\n\n # Format caveat length\n caveats = job_container.getCaveats()\n caveat_cnt = self.max_caveat_length - len(caveats)\n\n if caveats:\n result = result + caveats + ' '*caveat_cnt\n else:\n result = result + ' '*caveat_cnt\n\n remaining = job_dag.size()\n print(result, \"remaining: %-3d active: %-2d\" % (remaining, len(self.active)), [x.name for x in self.active])", "def status(self):\n return self.job_proto.status", "def get_async_job_status(self, job_id, batch=False):\n path = '%s' % job_id\n return self.make_request(path, 'GET', batch=batch)", "def GetJobStatus(self, job_id):\n return self._SendRequest(HTTP_GET,\n \"/%s/jobs/%s\" % (GANETI_RAPI_VERSION, job_id),\n None, None)", "def job_status(job_id):\n job_db = JobDb()\n job = job_db.get_job_by_id(job_id)\n job_db.close()\n\n if job is None:\n raise ApiError(\n \"job_not_found\",\n f\"Job '{job_id}' not found\",\n 404)\n\n job['duration'] = str(datetime.timedelta(\n seconds=int((job['updated'] - job['created']).total_seconds())))\n return jsonify(job)", "def _get_job_status(self):\n total_hits = session.query(BoxHit).filter_by(training_job_id=self.id).count()\n num_hits_left = session.query(BoxHit).filter_by(training_job_id=self.id, outstanding=True).count()\n total_urls = self.num_urls\n num_urls_left = session.query(VideoTrainingURL).filter_by(job=self, processed=False).count()\n faces_obtained = MTurkBox.query.filter_by(label=self.evaluator.target_label, result=True).count()\n return '\\n'.join([\n '------------- Stats for Job ID: %s -------------' % str(self.id) ,\n 'Job for Label : %s' % self.label.name,\n 'Total URLs : %d' % total_urls,\n 'Total HITs : %d' % total_hits,\n 'unprocessed URLS : %d' % num_urls_left,\n 'outstanding Hits : %d' % num_hits_left,\n 'Job Finish Status : %s' % self.finished,\n 'Faces Obtained : %d' % faces_obtained,\n ]) + '\\n'", "def check_status(self, job_id, config_id=1):\n response = self.do_request(\n self.base_url +\n \"/oasis/statusAsync/\" +\n str(config_id) + \"/\" +\n str(job_id) + \"/\"\n )\n return response", "def get_job_status(job_url, build_number, username, password):\n try:\n url = \"{}{}/api/json\".format(job_url, str(build_number))\n res = requests.get(url, auth=(username, password))\n build_status_json = json.loads(res.text)\n return build_status_json[\"result\"]\n\n except requests.exceptions.RequestException as e:\n print (e)\n sys.exit(2)", "def get_job_state(self, response) -> Text:\n return response['state']", "def get_status(item_id: str, job_id: str):\n url = \"%s/content/users/%s/items/%s/status/\" % (\n root_uri, username, item_id)\n data = {\n \"token\": token,\n \"jobType\": \"export\",\n \"jobId\": job_id,\n \"f\": \"json\"\n }\n status_request = requests.post(url, data=data)\n return status_request.json()", "def result(self, job):\n\n assert isinstance(job, six.string_types)\n\n try:\n response = requests.get('{}/api/v1/result/{}'.format(self.URL, job))\n except (Timeout, ConnectionError):\n raise ServiceError('Service unavailable: timeout.', 4)\n\n result = self._validate(response)\n data = result.get('state')\n state = State.from_dict(data) if data else None\n\n if state is not None:\n self.__previous_job = self.__current_job\n self.__current_job = None\n\n return result.get('status'), state" ]
[ "0.7616898", "0.74997103", "0.7483296", "0.7299432", "0.7210846", "0.7101452", "0.70003635", "0.6996187", "0.69212604", "0.68891513", "0.68681103", "0.6823939", "0.682249", "0.6807101", "0.68042487", "0.6784051", "0.67744637", "0.6770887", "0.67177606", "0.6702504", "0.66799605", "0.66681314", "0.6650797", "0.6614906", "0.6613384", "0.66089326", "0.6569058", "0.6538966", "0.650529", "0.6503313" ]
0.7644943
0
Request the provided job be stopped; i.e., transitioned to the ``STOPPED`` exec step.
async def request_job_stop(self, job_id: str, *args, **kwargs) -> bool: # TODO: implement raise NotImplementedError('{} function "request_job_stop" not implemented yet'.format(self.__class__.__name__))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stop_job(self):\n # DELETE /jobs/{job_id}/results\n pass", "def stop(self) -> None:\n self._client.terminate_job(jobId = self.id, reason = self.STOP_REASON)", "def stop_training_job(TrainingJobName=None):\n pass", "def stop_transform_job(TransformJobName=None):\n pass", "def stop_labeling_job(LabelingJobName=None):\n pass", "def job_stop(self, job_id):\n resp = self.backend.job_stop(job_id)\n\n self.refresh_jobs()", "def stop_compilation_job(CompilationJobName=None):\n pass", "def stop(self, bGraceful = False):\n\t\treturn Job(SDK.PrlVm_Stop(self.handle, bGraceful)[0])", "def stop_text_translation_job(JobId=None):\n pass", "def stopJob(self):\n if len(self.__jobQueue) > 0:\n _JobThread.stopJobThreadInstance(\n self.caller, self.__jobQueue[0].stopRun)", "def request_stop(self):\n self._messaged.emit((\"stop\",None,0,None))", "def stop(instance):\n if instance.state == STOPPED:\n return\n\n Queue.objects.add(function=\"terminate\", instance=instance)", "def stop(self):\n self.requested_state = 'Stopped'\n self.ml_interface.stop()", "def stop(\n address: Optional[str],\n no_wait: bool,\n job_id: str,\n headers: Optional[str],\n verify: Union[bool, str],\n):\n client = _get_sdk_client(address, headers=headers, verify=verify)\n cli_logger.print(f\"Attempting to stop job '{job_id}'\")\n client.stop_job(job_id)\n\n if no_wait:\n return\n else:\n cli_logger.print(\n f\"Waiting for job '{job_id}' to exit \" f\"(disable with --no-wait):\"\n )\n\n while True:\n status = client.get_job_status(job_id)\n if status in {JobStatus.STOPPED, JobStatus.SUCCEEDED, JobStatus.FAILED}:\n _log_job_status(client, job_id)\n break\n else:\n cli_logger.print(f\"Job has not exited yet. Status: {status}\")\n time.sleep(1)", "async def stop(self):\n self._job.cancel()\n await super().stop()", "def vm_stop(self, params: dict) -> Tuple[\"Status\", dict]:", "def stop(self):\n self._context.state = STOPPED", "def cli(ctx, job_id):\n return ctx.gi.jobs.cancel_job(job_id)", "def test_step_stop_aborted(self, _step: PropertyMock):\n _step.return_value = None\n es = exposed.ExposedStep()\n es.stop()", "def stop_batch_job(self, name, error_on_stopped=False):\n if name not in self.batch_jobs:\n raise ValueError(\"job {} doesn't exists\".format(name))\n if name not in self.jobs:\n if error_on_stopped:\n raise ValueError(\"job {} doesn't exists\".format(name))\n return\n self.remove_job(name)\n _,args,kwargs,cleanup=self._batch_jobs_args.pop(name)\n if cleanup:\n cleanup(*args,**kwargs)", "async def kill(self):\n if self._state in (JobState.PENDING, JobState.RUNNING):\n await self._process.kill()\n else:\n raise JobInvalidStateError('job is not running')", "def stop_workflow_execution(self, cntx, **kwargs):\n execution_id = kwargs.get('execution_id')\n\n return db_api.execution_update(execution_id,\n {\"state\": states.STOPPED})", "def kill_job(self, job):\n\n if job.status == Job.STATUS_QUEUED:\n # case 1: job is in QUEUED state\n # remove it from the queue and mark as killed\n\n job_queue = job_queue_name(job.model)\n logger.info(\n \"killing job {} by removing from queue {}\".\n format(job.uuid, job_queue))\n\n command_dict = {'command': 'PROCESS_JOB', 'job_uuid': job.uuid}\n remove_command(redis_connection(), job_queue, command_dict)\n job.status = Job.STATUS_KILLED\n # save it\n Job[job.uuid] = job\n elif job.status == Job.STATUS_RUNNING:\n # case 2: job is in RUNNING state\n # send message to worker to kill the job\n worker = worker_name(job.worker_url, job.model)\n worker_channel = node_channel_name(worker)\n logger.info(\"sending command to kill job on channel {}\".\n format(worker_channel))\n command_dict = {'command': \"KILL_JOB\", 'job_uuid': job.uuid}\n publish_command(redis_connection(), worker_channel, command_dict)\n else:\n logger.info(\"kill called on job {} in incompatible state {}\".\n format(job.uuid, job.status))", "def run_job(job, interrupt_if_necessary):", "def stop_run(arn=None):\n pass", "def kill(self):\n return self._raw_execute(\"cancel\", {\"job_id\": self.job_id})", "def stop_procedure(self):\n pass", "def post(self, job_id):\n try:\n if job_id == \"all\":\n log.info(\"Attempting to stop all jobs.\")\n self.runner_service.stop_all()\n log.info(\"Stopped all jobs!\")\n self.set_status(200)\n elif job_id:\n log.info(\"Attempting to stop job: {}\".format(job_id))\n self.runner_service.stop(job_id)\n self.set_status(200)\n else:\n ArteriaUsageException(\"Unknown job to stop\")\n except ArteriaUsageException as e:\n log.warning(\"Failed stopping job: {}. Message: \".format(job_id, e.message))\n self.send_error(500, reason=e.message)", "def _stop_if_active(self):\n if self.training_job_id is None:\n return\n\n job = self._get_training_job()\n if job.get('status') not in (\n TrainingJobStatus.STOPPED.value,\n TrainingJobStatus.COMPLETED.value,\n TrainingJobStatus.FAILED.value\n ):\n self._update_status({\n 'status': 'Stopped',\n 'completion_time': datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%S\")\n })\n self.logger.info('stop training job {}'.format(self.training_job_id))", "def stop_hyper_parameter_tuning_job(HyperParameterTuningJobName=None):\n pass" ]
[ "0.70777583", "0.70626056", "0.7052685", "0.68527895", "0.6800575", "0.6784599", "0.6706217", "0.6705649", "0.66965103", "0.6625707", "0.6621099", "0.6580438", "0.65283185", "0.6504719", "0.64555734", "0.6401517", "0.6334278", "0.6256518", "0.6234194", "0.62264514", "0.62126917", "0.61838275", "0.6144664", "0.6135996", "0.61323047", "0.61208487", "0.6116946", "0.611238", "0.6104475", "0.6098565" ]
0.71144575
0
Read a blockchain file and store blocks into a list
def loadBlockchain(path): list = [] filename = Blockchain(0, '0', 0, 0, 0).getFilename() f = open(path) for line in f: if line == '\n': continue linesplit = line[:-1].split(';') b = Blockchain(int(linesplit[0]), linesplit[1], int(linesplit[2]), int(linesplit[3]) ,linesplit[4]) list.append(b) f.close() return list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_data(self):\n try:\n with open(\"blockchain.txt\", mode=\"r\") as f:\n file_content = f.readlines()\n blockchain = json.loads(file_content[0][:-1])\n # OrderedDict\n updated_blockchain = []\n for block in blockchain:\n converted_transfers = [\n Transfer(tx[\"user\"], tx[\"signature\"], tx[\"amount\"])\n for tx in block[\"transfers\"]\n ]\n # converted_transfers = [OrderedDict(\n # [('user', tx['user']), ('amount', tx['amount'])]) for tx in block['transfers']]\n updated_block = Block(\n block[\"index\"],\n block[\"previous_hash\"],\n converted_transfers,\n block[\"proof\"],\n block[\"timestamp\"],\n )\n updated_blockchain.append(updated_block)\n self.__chain = updated_blockchain\n open_transfers = json.loads(file_content[1][:-1])\n # OrderedDict\n updated_transfers = []\n for tx in open_transfers:\n updated_transfer = Transfer(\n tx[\"user\"], tx[\"signature\"], tx[\"amount\"]\n )\n # updated_transfer = OrderedDict(\n # [('user', tx['user']), ('amount', tx['amount'])])\n updated_transfers.append(updated_transfer)\n self.__open_transfers = updated_transfers\n peer_nodes = json.loads(file_content[2])\n self.__peer_nodes = set(peer_nodes)\n\n except (IOError, IndexError):\n pass", "def load_data(self):\n try:\n with open('blockchain-{}.txt'.format(self.node_id), mode='r') as f:\n file_content = f.readlines()\n blockchain = json.loads(file_content[0][:-1])\n updated_blockchain = []\n for block in blockchain:\n converted_tx = [Transaction(\n tx['sender'], tx['recipient'], tx['signature'], tx['amount']) for tx in block['transactions']]\n converted_chip = [Chipsaction(\n tx['sender'], tx['recipient'], tx['follow'], tx['message'], tx['signature'], tx['amount']) for tx in block['chipsactions']]\n converted_message = [Messsaction(\n tx['sender'], tx['follower'], tx['message'], tx['signature']) for tx in block['messsactions']]\n updated_block = Block(\n block['index'], block['previous_hash'], converted_tx, converted_chip, converted_message, block['proof'], block['timestamp'])\n updated_blockchain.append(updated_block)\n self.chain = updated_blockchain\n\n open_transactions = json.loads(file_content[1][:-1])\n # need to convert the loaded data because Transactions should use OrderedDict\n updated_transactions = []\n for tx in open_transactions:\n updated_transaction = Transaction(\n tx['sender'], tx['recipient'], tx['signature'], tx['amount'])\n updated_transactions.append(updated_transaction)\n self.__open_transactions = updated_transactions\n\n open_chipsactions = json.loads(file_content[2][:-1])\n # need to convert the loaded data because Chipsactions should use OrderedDict\n updated_chipsactions = []\n for tx in open_chipsactions:\n updated_chipsaction = Chipsaction(\n tx['sender'], tx['recipient'], tx['follow'], tx['message'], tx['signature'], tx['amount'])\n updated_chipsactions.append(updated_chipsaction)\n self.__open_chipsactions = updated_chipsactions\n\n open_messsactions = json.loads(file_content[3][:-1])\n # need to convert the loaded data because Messsactions should use OrderedDict\n updated_messsactions = []\n for tx in open_messsactions:\n updated_messsaction = Messsaction(\n tx['sender'], tx['follower'], tx['message'], tx['signature'])\n updated_messsactions.append(updated_messsaction)\n self.__open_messsactions = updated_messsactions\n\n peer_nodes = json.loads(file_content[4])\n self.__peer_nodes = set(peer_nodes)\n except (IOError, IndexError):\n pass\n finally:\n print('Cleanup!')", "def blocks_read(file, filesize):\n # core.PACKET_SIZE = getPacketSize(filesize, args.blocks)\n blocks_n = math.ceil(filesize / core.PACKET_SIZE)\n blocks = []\n\n # Read data by blocks of size core.PACKET_SIZE\n for i in range(blocks_n):\n \n data = bytearray(file.read(core.PACKET_SIZE))\n\n if not data:\n raise \"stop\"\n\n # The last read bytes needs a right padding to be XORed in the future\n if len(data) != core.PACKET_SIZE:\n data = data + bytearray(core.PACKET_SIZE - len(data))\n assert i == blocks_n-1, \"Packet #{} has a not handled size of {} bytes\".format(i, len(blocks[i]))\n\n # Paquets are condensed in the right array type\n blocks.append(np.frombuffer(data, dtype=core.NUMPY_TYPE))\n return blocks", "def create_or_read_file(self):\n # make sure the 'files' directory exists\n if not os.path.isdir('../files'):\n os.mkdir('../files')\n try:\n # try to read in files from disk if they exist\n read_file = open(self.pickle_path, 'rb')\n self.blockchain = pickle.load(read_file)\n read_file.close()\n # print('blockchain loaded from file')\n except FileNotFoundError:\n # if no blockchain exists, initialize one with the genesis block\n self.blockchain = [ # Genesis block! as the first block in the chain the hashes are predetermined.\n Block(\n index=0,\n timestamp=str(datetime.datetime.now()),\n transactions=[]\n )\n ]\n self.write_to_disk()", "def deserialize(self, reader: serialization.BinaryReader) -> None:\n super(Block, self).deserialize(reader)\n content_count = reader.read_var_int(max=self.MAX_CONTENTS_PER_BLOCK)\n if content_count == 0:\n raise ValueError(\"Deserialization error - no contents\")\n\n self.consensus_data = reader.read_serializable(payloads.ConsensusData)\n tx_count = content_count - 1\n for _ in range(tx_count):\n self.transactions.append(reader.read_serializable(payloads.Transaction))\n\n if len(set(self.transactions)) != tx_count:\n raise ValueError(\"Deserialization error - block contains duplicate transaction\")\n\n hashes = [t.hash() for t in self.transactions]\n if Block.calculate_merkle_root(self.consensus_data.hash(), hashes) != self.merkle_root:\n raise ValueError(\"Deserialization error - merkle root mismatch\")", "def readBlocks(self):\n self.data_block_list = []\n self.data_block_list.append(Rhd2000DataBlock(self))\n #read data blocks untill the EOF\n while True:\n try:\n self.data_block_list.append(Rhd2000DataBlock(self))\n except:\n break", "def get_blocks():\n chain_to_send = blockchain\n blocklist = \"\"\n for i in range(len(chain_to_send)):\n block = chain_to_send[i]\n block_index = str(block.index)\n block_timestamp = str(block.timestamp)\n block_data = str(block.data)\n block_hash = block.hash\n assembled = json.dumps({\n \"index\": block_index,\n \"timestamp\": block_timestamp,\n \"data\": block_data,\n \"hash\": block_hash\n })\n if blocklist == \"\":\n blocklist = assembled\n else:\n blocklist += assembled\n return blocklist\n\n chain_to_send = json.dumps(chain_to_send)\n return chain_to_send", "def get_blocks(self) -> list:\n self.clingo = ClingoBridge() # reset clingo\n\n base = ('base', '')\n self.clingo.add_file('initial-states.lp')\n self.clingo.run([base], n=1)\n output = self.clingo.output[0]\n\n blocks = []\n for atom in output:\n if atom.name == 'block':\n blocks.append(atom)\n\n return blocks", "def generate_blocks(block_file):\n # inspired by https://gist.github.com/anonymous/2204527\n code_points_ranges = []\n blocks = []\n\n match = re.compile(r'([0-9A-F]+)\\.\\.([0-9A-F]+);\\ (\\S.*\\S)', re.UNICODE)\n\n with open(block_file,\"rb\") as f:\n for line in f:\n line = Encoder.str2uni(line)\n p = re.findall(match, line)\n if p:\n code_point_range_from, code_point_range_to, block_name = p[0]\n if block_name == 'No_Block':\n continue\n block_name = block_name.upper()\n if block_name not in blocks:\n blocks.append(block_name)\n code_points_ranges.append((\n int(code_point_range_from, 16),\n int(code_point_range_to, 16),\n blocks.index(block_name)))\n code_points_ranges.sort()\n\n blocks_data = {\n 'blocks': blocks,\n 'code_points_ranges': code_points_ranges,\n }\n Jfile.dump(_lexicons.format('blocks.json'), blocks_data)", "def _load_saved_ledger(self):\n\n with open(self.ledger_file, 'r') as ledger:\n self.log.debug('Loading blocks from local ledger!')\n i = 0\n for block_str in ledger:\n i += 1\n if self._add_block_str(block_str.strip(), False):\n self.log.info(\"Loaded block %d\", i)\n\n # After loading all blocks from file, tell our miner to continue\n self.last_update = self.latest_time\n self.mining_flag = CONTINUE_MINING", "def get_blocks(fname):\n with open(fname, 'r') as fh:\n lines = iter(fh.readlines())\n parts = []\n line = next(lines)\n while True:\n if line.startswith('diff --git'):\n block = [line]\n for line in lines:\n if line.startswith('@@'):\n break\n block.append(line)\n parts.append(block)\n if line.startswith('@@'):\n block = [line]\n for line in lines:\n if line.startswith('@@') or line.startswith('diff --git'):\n break\n block.append(line)\n parts.append(block)\n if line.startswith('\\\\ No newline'):\n parts[-1].append(line)\n try:\n line = next(lines)\n except StopIteration:\n break\n if not lines:\n break\n return parts", "def parse_blocks(fblocks):\n print('Parse blocks: ', end='')\n result = []\n\n for line in fblocks:\n stripped = line.strip()\n if len(stripped) > 0 and stripped[0] != '#':\n match = re.match(r\"([0-9A-F]+)\\.{2}([0-9A-F]+);\\s+(.+)\", stripped)\n result.append({\n 'begin': int(match.group(1), 16),\n 'end': int(match.group(2), 16),\n 'name': match.group(3)\n })\n\n print('done')\n return result", "def _prepare_blocks():\n\n counter = blocks[0]['freeStart']\n maxBlocks = blocks[0]['maxBlocks']\n while(counter < maxBlocks) :\n try:\n # print (mount['parent'] + '/linddata.' + str(counter))\n f = open(mount['parent'] + '/linddata.' + str(counter), 'r') \n except IOError, e:\n return STATUS['M_BD']\n else :\n fdatastring = f.next()\n fdata = deserializedata(fdatastring)\n blocks[counter] = fdata\n counter += 1\n \n return STATUS['OK']", "def readTestFile(self, filename):\n size = 0\n agentNum = 0\n block = {}\n agentList = []\n f = open(filename, 'r')\n for line in f:\n if line[0] != '#':\n c = line.split(' ')\n if c[0] == 'grid':\n size = int(line[5:7])\n elif c[0] =='block':\n block[(int(c[2]), int(c[1]))] = (int(c[3]) - int(c[1]) + 1, int(c[4]) - int(c[2]) + 1)\n elif c[0] == 'nets':\n agentNum = int(c[1])\n elif c[0] == 'net' or c[0] == 'xet':\n print(c)\n agentList.append([int(c[1]), (int(c[3]), int(c[2])), (int(c[6]), int(c[5]))])\n f.close()\n print(size)\n print(block)\n print(agentNum)\n print(agentList)\n return size, block, agentNum, agentList", "def __init__(self, transactions=None):\n\n self.blocks = []\n if transactions:\n if type(transactions) is not list:\n raise Exception(\"Data must be a list of transactions!\")\n\n for i, tx in enumerate(transactions):\n if i == 0: # Create genesis block\n if not signature.verify(tx.from_pk, tx.to_string_for_hashing(), tx.signature):\n print(\"Genesis transaction signature is NOT valid.\")\n return\n prev_hash = \"0\" # Arbitrary prev_hash for genesis block\n new_block = Block.create_from_transaction(tx, prev_hash)\n self.blocks.append(new_block)\n else:\n if not self.validate_transaction(tx):\n print(\"Transaction is NOT valid.\")\n return\n new_block = Block.create_from_transaction(tx, self.blocks[-1].header_hash)\n self.validate_and_add_block(new_block)", "def ReadData( fName = '/tmp/chartdata' ):\n blocks = common.ReadDataFromFile( fName )\n\n return blocks", "def parse_file(filepath):\n with fitz.open(filepath) as doc:\n block_dict = {(idx + 1): page.getText(\"blocks\") for idx, page in enumerate(doc)}\n block_dict = {\n key: [block[4] for block in value] for key, value in block_dict.items()\n }\n return block_dict", "def GetBlocks(state):\n result = []\n last_pos = 0\n for entry in state:\n pos = entry['pos']\n # Calculate block start points from the beginning of individual lines.\n blocks = [(s[0]-last_pos, s[1]-s[0]) for s in entry['blocks']]\n # Add one end marker block.\n blocks.append((pos-last_pos, 0))\n result.append(blocks)\n last_pos = pos\n return result", "def ejecutar_cargar_blockchain_cupicoin() -> list:\n bloques = None\n archivo = input(\"Por favor ingrese el nombre del archivo CSV con las transacciones: \")\n bloques = cc.cargar_blockchain_cupicoin(archivo)\n if len(bloques) == 0:\n print(\"El archivo seleccionado no es valido. No se pudieron cargar los bloques.\")\n else:\n print(\"Se cargaron los siguientes bloques con su correspondiente hash a partir del archivo.\")\n for transaccion in bloques:\n print(transaccion[\"hash\"])\n return bloques", "def load(self, file_name):\n self.file_name = file_name\n\n with open(file_name, 'rb') as in_file:\n eof = (in_file.read(1) == b'')\n\n while not eof:\n key = int(in_file.read(4))\n code = in_file.read(1).decode()\n block = None\n if key == 1:\n block = FRDHeader(in_file, code)\n self.headers.append(block)\n elif key == 2:\n block = FRDNodeBlock(in_file)\n self.node_block = block\n elif key == 3:\n block = FRDElemBlock(in_file)\n self.elem_block = block\n elif key == 100:\n block = FRDResultBlock(in_file)\n self.result_blocks.append(block)\n elif key == 9999:\n eof = True\n if block is not None:\n self.blocks.append(block)\n eof = (eof or (in_file.read(1) == b''))", "def read_change(filename):\n \n all_coins = []\n for line in open(filename):\n line = line.strip(\"\\n\")\n coin_info = line.split(\",\")\n coin_type = int(coin_info[0].strip())\n coin_count = int(coin_info[1])\n for i in range(coin_count):\n all_coins.append(coin_type)\n return all_coins", "def read_blocks(extdir):\n\n pids = get_pids(extdir)\n blocks = get_blocks(extdir)\n\n services = sorted(pids)\n ret = []\n for service in services:\n if service not in blocks:\n continue\n\n blks = blocks[service]\n for i, blk in enumerate(blks):\n name = service\n instance = \"{}-{}\".format(service, i)\n\n blk.setdefault(\"name\", name)\n blk.setdefault(\"instance\", instance)\n blk.setdefault(\"full_text\", \"running\")\n ret.append(blk)\n\n return ret", "def read_file(file_path):\n\n payload_list = list()\n\n if os.path.isfile(file_path):\n print(\"Loading File in: \" + file_path)\n\n with open(file_path, 'rb') as f:\n count = 0\n while True:\n chunk = f.read(25) \n if chunk:\n if count == 100:\n count = 0\n num = num_generator(count)\n crc = calculate_crc(chunk+bytes(num.encode('utf-8')))\n count = count + 1\n payload_list.append(chunk+bytes(num.encode('utf-8'))+bytes(crc.encode('utf-8')))\n else:\n break\n else:\n print(\"ERROR: file does not exist in PATH: \" + file_path)\n \n return payload_list", "def make_blocks_from_blockhashes(blockhashes):\n blocks = []\n\n for (height, blockhash) in enumerate(blockhashes):\n block = {\"hash\": blockhash, \"height\": height, \"tx\": []}\n if height != 0:\n block[\"previousblockhash\"] = previousblockhash\n blocks.append(block)\n previousblockhash = blockhash\n\n return blocks", "def read(self, block_no):\n with open(self.file_path, 'r+') as f:\n f.seek(block_no * config.block_size)\n return f.read(config.block_size)", "def get_blocks(extdir):\n\n block_fnames = glob.glob(extdir + \"/*.block\")\n\n blocks = {}\n for fname in block_fnames:\n try:\n # Get the json state\n with open(fname) as fobj:\n block = json.load(fobj)\n except (OSError, IOError, ValueError):\n continue\n\n if isinstance(block, list):\n pass\n elif isinstance(block, dict):\n block = [block]\n else:\n continue\n\n service = os.path.basename(fname)\n service = service.split(\".\")[0]\n blocks[service] = block\n\n return blocks", "def read_block(chunk):\n\n\t# Chunk number and data\n\tchunk_id = chunk[0]\n\tdata = chunk[1]\n\n\t# For the implicit tape data chunk, just read the block as a series\n\t# of bytes, as before\n\tif chunk_id == 0x100:\n\n\t\tblock = data\n\n\telse:\t# 0x102\n\n\t\tif UEF_major == 0 and UEF_minor < 9:\n\n\t\t\t# For UEF file versions earlier than 0.9, the number of\n\t\t\t# excess bits to be ignored at the end of the stream is\n\t\t\t# set to zero implicitly\n\t\t\tignore = 0\n\t\t\tbit_ptr = 0\n\t\telse:\n\t\t\t# For later versions, the number of excess bits is\n\t\t\t# specified in the first byte of the stream\n\t\t\tignore = data[0]\n\t\t\tbit_ptr = 8\n\n\t\t# Convert the data to the implicit format\n\t\tblock = []\n\t\twrite_ptr = 0\n\n\t\tafter_end = (len(data)*8) - ignore\n\t\tif after_end % 10 != 0:\n\n\t\t\t# Ensure that the number of bits to be read is a\n\t\t\t# multiple of ten\n\t\t\tafter_end = after_end - (after_end % 10)\n\n\t\twhile bit_ptr < after_end:\n\n\t\t\t# Skip start bit\n\t\t\tbit_ptr = bit_ptr + 1\n\n\t\t\t# Read eight bits of data\n\t\t\tbit_offset = bit_ptr % 8\n\t\t\tif bit_offset == 0:\n\t\t\t\t# Write the byte to the block\n\t\t\t\tblock[write_ptr] = data[bit_ptr >> 3]\n\t\t\telse:\n\t\t\t\t# Read the byte containing the first bits\n\t\t\t\tb1 = data[bit_ptr >> 3]\n\t\t\t\t# Read the byte containing the rest\n\t\t\t\tb2 = data[(bit_ptr >> 3) + 1]\n\n\t\t\t\t# Construct a byte of data\n\t\t\t\t# Shift the first byte right by the bit offset\n\t\t\t\t# in that byte\n\t\t\t\tb1 = b1 >> bit_offset\n\n\t\t\t\t# Shift the rest of the bits from the second\n\t\t\t\t# byte to the left and ensure that the result\n\t\t\t\t# fits in a byte\n\t\t\t\tb2 = (b2 << (8 - bit_offset)) & 0xff\n\n\t\t\t\t# OR the two bytes together and write it to\n\t\t\t\t# the block\n\t\t\t\tblock[write_ptr] = b1 | b2\n\n\t\t\t# Increment the block pointer\n\t\t\twrite_ptr = write_ptr + 1\n\n\t\t\t# Move the data pointer on eight bits and skip the\n\t\t\t# stop bit\n\t\t\tbit_ptr = bit_ptr + 9\n\n\t# Read the block\n\tname = ''\n\ta = 1\n\twhile True:\n\t\tc = block[a]\n\t\tif ord(c) != 0:\t\t# was > 32:\n\t\t\tname = name + c\n\t\ta = a + 1\n\t\tif ord(c) == 0:\n\t\t\tbreak\n\n\tload = str2num(4, block[a:a+4])\n\texec_addr = str2num(4, block[a+4:a+8])\n\tblock_number = str2num(2, block[a+8:a+10])\n\tlast = str2num(1, block[a+12])\n\n\tif last & 0x80 != 0:\n\t\tlast = 1\n\telse:\n\t\tlast = 0\n\n\treturn (name, load, exec_addr, block[a+19:-2], block_number, last)", "def parse_config(fpath):\n\n with open(fpath, \"r\") as f:\n # Ignore lines consisting only of whitespace or commented lines.\n lines = [\n line.strip() for line in f.readlines()\n if not (line.isspace() or line.startswith(\"#\"))\n ]\n\n # Each block begins with a line of the form \"[type]\", with the block type\n # (eg, \"convolutional\") enclosed in square brackets. Chunk config text\n # into blocks.\n block_start_lines = [\n line_num for line_num, line in enumerate(lines) if line.startswith(\"[\")\n ]\n block_start_lines.append(len(lines))\n\n text_blocks = []\n for i in range(1, len(block_start_lines)):\n block_start, block_end = block_start_lines[i-1], block_start_lines[i]\n text_blocks.append(lines[block_start:block_end])\n\n def str2type(raw_val):\n \"\"\"\n Helper function to convert a string input to the appropriate\n type (str, int, or float).\n \"\"\"\n try:\n return int(raw_val)\n except ValueError:\n pass\n\n try:\n return float(raw_val)\n except ValueError:\n return raw_val\n\n blocks = []\n net_info = None\n for text_block in text_blocks:\n block = {\"type\": text_block[0][1:-1]}\n for line in text_block[1:]:\n key, raw_val = line.split(\"=\")\n key = key.strip()\n\n # Convert fields with multiple comma-separated values into lists.\n if \",\" in raw_val:\n val = [str2type(item.strip()) for item in raw_val.split(\",\")]\n else:\n val = str2type(raw_val.strip())\n\n # If this is a \"route\" block, its \"layers\" field contains either\n # a single integer or several integers. If single integer, make it\n # a list for consistency.\n if (\n block[\"type\"] == \"route\"\n and key == \"layers\"\n and isinstance(val, int)\n ):\n val = [val]\n\n # If this is a \"yolo\" block, it contains an \"anchors\" field\n # consisting of (anchor width, anchor height) pairs of values;\n # thus, we group anchor values into chunks of two.\n if key == \"anchors\":\n val = [val[i:i+2] for i in range(0, len(val), 2)]\n\n block[key] = val\n\n if block[\"type\"] == \"net\":\n net_info = block\n else:\n blocks.append(block)\n\n return blocks, net_info", "def conservedBlocks():\n path = \"./data/\"\n for file in os.listdir(path):\n if file.endswith(\".fa\") or file.endswith(\".fasta\"):\n alignin = AlignIO.read(path + file, \"fasta\")\n try:\n filecore = file.rstrip(\".fa\")\n except:\n filecore = file.rstrip(\".fasta\")\n fileout = path + filecore + \".blocks\"\n \n # constants\n align = []\n cons = []\n border = []\n blocks = []\n \n # alignment\n for pos in range(0,(alignin.get_alignment_length())):\n column=alignin[:,pos]\n if \"-\" not in column:\n align.append(column)\n cons.append(pos)\n \n \n if cons != []: \n border.append(cons[0])\n border.append(cons[len(cons)-1])\n for i in range(0, len(cons)-1):\n if int(cons[i]+1)!=int(cons[i+1]):\n border.append(cons[i])\n \n for j in range((len(cons)-1), 0, -1):\n if int(cons[j]-1)!=int(cons[j-1]):\n border.append(cons[j]) \n \n # list of positions for the blocks\n order=sorted(border)\n \n # get the blocks and writes to the .blocks file\n o=open(fileout, \"w\")\n \n for i in range(0,len(order)-1,2):\n beg=int(order[i])\n end=int(order[i+1])\n count = end-beg \n block=alignin[:,beg:end]\n \n # specify the minimum length of a gap\n if count < 3:\n pass\n else: \n blocks.append(block) \n o.write('***Block***'+\"\\n\"+\"Start:\"+str(beg)+\\\n \"\\n\"+\"Count:\"+str(count)+\"\\n\")\n for record in block:\n o.write(str(record.seq)+\"\\n\")\n o.close()\n else:\n o=open(fileout, \"w\")\n o.close()\n pass\n return", "def bz2file_to_datapoints(filename: str, data_block_size) -> List[DataPoint]:\n datapoints = []\n bz_file = bz2.BZ2File(filename)\n\n line_number = 1\n\n for line in bz_file:\n row = line.decode(\"utf-8\").replace(\"\\r\\n\", \"\").split(\n \",\")\n if (len(row) == 5):\n sample = str([row[2], row[3], row[4]])\n else:\n sample = str([row[2]])\n\n start_time = datetime.datetime.fromtimestamp(int(row[0]) / 1000)\n localtz = timezone('US/Central')\n start_time = localtz.localize(start_time)\n end_time = \"\"\n\n if line_number > data_block_size:\n yield datapoints\n datapoints.clear()\n line_number = 1\n else:\n datapoints.append(DataPoint(start_time=start_time, end_time=end_time, sample=sample))\n line_number += 1\n\n yield datapoints" ]
[ "0.74427164", "0.68054676", "0.6126557", "0.60757756", "0.59914404", "0.5931667", "0.5885084", "0.5811858", "0.5760393", "0.57101476", "0.56943816", "0.5690348", "0.56846213", "0.5644611", "0.56390595", "0.56177294", "0.55774206", "0.5571329", "0.55698186", "0.55587953", "0.55469596", "0.5537467", "0.5503996", "0.54548573", "0.5450782", "0.54255927", "0.5416154", "0.5414923", "0.5412907", "0.5411086" ]
0.6991728
1
Convert a block in string to a block object
def stringToBlock(strBlock): blocksplit = strBlock.split(';') block = Blockchain(int(blocksplit[0]), blocksplit[1], int(blocksplit[2]), int(blocksplit[3]) ,blocksplit[4]) return block
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _decode_block_string(block_string):\n assert isinstance(block_string, str)\n\n ops = block_string.split('_')\n options = {}\n for op in ops:\n splits = re.split(r'(\\d.*)', op)\n if len(splits) >= 2:\n key, value = splits[:2]\n options[key] = value\n\n # Check stride\n assert (('s' in options and len(options['s']) == 1) or\n (len(options['s']) == 2 and options['s'][0] == options['s'][1]))\n\n return BlockArgs(\n kernel_size=int(options['k']),\n num_repeat=int(options['r']),\n input_filters=int(options['i']),\n output_filters=int(options['o']),\n expand_ratio=int(options['e']),\n id_skip=('noskip' not in block_string),\n se_ratio=float(options['se']) if 'se' in options else None,\n stride=[int(options['s'][0])])", "def _decode_block_string(block_string):\n assert isinstance(block_string, str)\n\n ops = block_string.split('_')\n options = {}\n for op in ops:\n splits = re.split(r'(\\d.*)', op)\n if len(splits) >= 2:\n key, value = splits[:2]\n options[key] = value\n\n # Check stride\n assert (('s' in options and len(options['s']) == 1) or\n (len(options['s']) == 2 and options['s'][0] == options['s'][1]))\n\n return BlockArgs(\n kernel_size=int(options['k']),\n num_repeat=int(options['r']),\n input_filters=int(options['i']),\n output_filters=int(options['o']),\n expand_ratio=int(options['e']),\n id_skip=('noskip' not in block_string),\n se_ratio=float(options['se']) if 'se' in options else None,\n stride=[int(options['s'][0])])", "def parse_block(\n string: str,\n vars: Dict,\n neg: bool = False,\n min_: int = 1,\n max_: int = 1,\n label: Optional[str] = None,\n start: int = -1,\n) -> Block:\n it: BlockIterator = BlockIterator(string, start=start)\n members: List[Union[Block, Unit, Ref]] = []\n block = Block(members, vars, neg=neg, min_=min_, max_=max_, label=label)\n for content, neg, min_, max_, label, type_, line_num in it:\n if type_ == Types.BLOCK:\n members.append(parse_block(content, vars, neg, min_, max_, label, line_num))\n elif type_ == Types.UNIT:\n members.append(parse_unit(content, neg, min_, max_, label))\n elif type_ == Types.VAR_REF:\n members.append(Ref(content, neg, min_, max_, label))\n # use this after the iteration is complete\n block.union = it.is_union\n if not block.members and not EMPTY_BLOCK_RE.match(string):\n raise ValueError(\n f\"Cannot parse block contents: {string} \" f\"starting at {start}\"\n )\n return block", "def update(blockstring):\n with BLOCK_LOCK:\n for block in blockstring.split('|'):\n name, values = parse(block)\n BLOCKS[name] = values", "def _decode_block_string(self, block_string: str):\n\n arg_strings = block_string.split('_')\n args = {}\n for arg_string in arg_strings:\n splits = re.split(r'(\\d.*)', arg_string)\n if len(splits) >= 2:\n key, value = splits[:2]\n args[key] = value\n num_repeat = int(args['r'])\n block_args = {\n 'kernel_size': int(args['k']),\n 'stride': int(args['s']),\n 'expand_ratio': int(args['e']),\n 'in_channels': int(args['i']),\n 'out_channels': int(args['o']),\n 'se_ratio': float(args['se']) if 'se' in args else None,\n }\n return block_args, num_repeat", "def from_string(block_string: str):\n ops = block_string.split(\"_\")\n options = {}\n for op in ops:\n splits = re.split(r\"(\\d.*)\", op)\n if len(splits) >= 2:\n key, value = splits[:2]\n options[key] = value\n\n # check stride\n stride_check = (\n (\"s\" in options and len(options[\"s\"]) == 1)\n or (len(options[\"s\"]) == 2 and options[\"s\"][0] == options[\"s\"][1])\n or (len(options[\"s\"]) == 3 and options[\"s\"][0] == options[\"s\"][1] and options[\"s\"][0] == options[\"s\"][2])\n )\n if not stride_check:\n raise ValueError(\"invalid stride option received\")\n\n return BlockArgs(\n num_repeat=int(options[\"r\"]),\n kernel_size=int(options[\"k\"]),\n stride=int(options[\"s\"][0]),\n expand_ratio=int(options[\"e\"]),\n input_filters=int(options[\"i\"]),\n output_filters=int(options[\"o\"]),\n id_skip=(\"noskip\" not in block_string),\n se_ratio=float(options[\"se\"]) if \"se\" in options else None,\n )", "def build_block(self, format_string):\n first_block = Block(None, py3_wrapper=self.py3_wrapper)\n block = first_block\n\n # Tokenize the format string and process them\n for token in self.tokens(format_string):\n value = token.group(0)\n if token.group(\"block_start\"):\n # Create new block\n block = block.new_block()\n elif token.group(\"block_end\"):\n # Close block setting any valid state as needed\n # and return to parent block to continue\n if not block.parent:\n raise Exception(\"Too many `]`\")\n block = block.parent\n elif token.group(\"switch\"):\n # a new option has been created\n block = block.switch()\n elif token.group(\"placeholder\"):\n # Found a {placeholder}\n key = token.group(\"key\")\n format = token.group(\"format\")\n block.add(Placeholder(key, format))\n elif token.group(\"literal\"):\n block.add(Literal(value))\n elif token.group(\"lost_brace\"):\n # due to how parsing happens we can get a lonesome }\n # eg in format_string '{{something}' this fixes that issue\n block.add(Literal(value))\n elif token.group(\"command\"):\n # a block command has been found\n block.set_commands(token.group(\"command\"))\n elif token.group(\"escaped\"):\n # escaped characters add unescaped values\n if value[0] in [\"\\\\\", \"{\", \"}\"]:\n value = value[1:]\n block.add(Literal(value))\n\n if block.parent:\n raise Exception(\"Block not closed\")\n # add to the cache\n self.block_cache[format_string] = first_block", "def _decode_block_str(block_str, depth_multiplier=1.0):\n assert isinstance(block_str, str)\n ops = block_str.split('_')\n block_type = ops[0] # take the block type off the front\n ops = ops[1:]\n options = {}\n noskip = False\n for op in ops:\n # string options being checked on individual basis, combine if they grow\n if op == 'noskip':\n noskip = True\n elif op.startswith('n'):\n # activation fn\n key = op[0]\n v = op[1:]\n if v == 're':\n value = F.relu\n elif v == 'r6':\n value = F.relu6\n elif v == 'hs':\n value = hard_swish\n elif v == 'sw':\n value = swish\n else:\n continue\n options[key] = value\n else:\n # all numeric options\n splits = re.split(r'(\\d.*)', op)\n if len(splits) >= 2:\n key, value = splits[:2]\n options[key] = value\n\n # if act_fn is None, the model default (passed to model init) will be used\n act_fn = options['n'] if 'n' in options else None\n exp_kernel_size = _parse_ksize(options['a']) if 'a' in options else 1\n pw_kernel_size = _parse_ksize(options['p']) if 'p' in options else 1\n fake_in_chs = int(options['fc']) if 'fc' in options else 0 # FIXME hack to deal with in_chs issue in TPU def\n\n num_repeat = int(options['r'])\n # each type of block has different valid arguments, fill accordingly\n if block_type == 'ir':\n block_args = dict(\n block_type=block_type,\n dw_kernel_size=_parse_ksize(options['k']),\n exp_kernel_size=exp_kernel_size,\n pw_kernel_size=pw_kernel_size,\n out_chs=int(options['c']),\n exp_ratio=float(options['e']),\n se_ratio=float(options['se']) if 'se' in options else None,\n stride=int(options['s']),\n act_fn=act_fn,\n noskip=noskip,\n )\n elif block_type == 'ds' or block_type == 'dsa':\n block_args = dict(\n block_type=block_type,\n dw_kernel_size=_parse_ksize(options['k']),\n pw_kernel_size=pw_kernel_size,\n out_chs=int(options['c']),\n se_ratio=float(options['se']) if 'se' in options else None,\n stride=int(options['s']),\n act_fn=act_fn,\n pw_act=block_type == 'dsa',\n noskip=block_type == 'dsa' or noskip,\n )\n elif block_type == 'er':\n block_args = dict(\n block_type=block_type,\n exp_kernel_size=_parse_ksize(options['k']),\n pw_kernel_size=pw_kernel_size,\n out_chs=int(options['c']),\n exp_ratio=float(options['e']),\n fake_in_chs=fake_in_chs,\n se_ratio=float(options['se']) if 'se' in options else None,\n stride=int(options['s']),\n act_fn=act_fn,\n noskip=noskip,\n )\n elif block_type == 'cn':\n block_args = dict(\n block_type=block_type,\n kernel_size=int(options['k']),\n out_chs=int(options['c']),\n stride=int(options['s']),\n act_fn=act_fn,\n )\n else:\n assert False, 'Unknown block type (%s)' % block_type\n\n return block_args, num_repeat", "def decode_block_string(self, block_string):\n assert isinstance(block_string, str)\n ops = block_string.split('_')\n options = {}\n for op in ops:\n splits = re.split(r'(\\d.*)', op)\n if len(splits) >= 2:\n key, value = splits[:2]\n options[key] = value\n\n if 's' not in options or len(options['s']) != 2:\n raise ValueError('Strides options should be a pair of integers.')\n\n self.input_filters = int(options['i'])\n self.output_filters = int(options['o'])\n self.dw_kernel_size = self._parse_ksize(options['k'])\n self.expand_kernel_size = self._parse_ksize(options['a'])\n self.project_kernel_size = self._parse_ksize(options['p'])\n self.num_repeat = int(options['r'])\n self.identity_skip = ('noskip' not in block_string)\n self.se_ratio = float(options['se']) if 'se' in options else None\n self.expand_ratio = int(options['e'])\n self.strides = [int(options['s'][0]), int(options['s'][1])]\n self.swish = 'sw' in block_string\n self.dilated = 'dilated' in block_string\n\n return self", "def Block_from_full_name(full_name):\n delim = full_name.find('#')\n if delim == -1:\n raise ObjectError('Invalid block name %s' % full_name)\n\n return full_name[:delim], Block_to_internal_name(full_name[delim + 1:])", "def parse(message):\n if not isinstance(message, str):\n raise TypeError(\"Block.from_network_format: expected message to be of type str\")\n if not message[0] == 'd':\n raise ValueError()\n block_number = int(message[1:7], 16)\n timestamp = int(message[7:15], 16)\n difficulty = int(message[15:17], 16)\n nonce = int(message[17:81], 16)\n previous_block_hash = message[81:145]\n merkle_root_hash = message[145:209]\n transaction_count = int(message[209:211], 16)\n message = message[211:]\n block_transactions = []\n for x in range(transaction_count):\n transaction_length = int(message[:5], 16)\n transaction = message[5:transaction_length + 5]\n block_transactions.append(transaction)\n message = message[transaction_length + 5:]\n str_block_transactions = \"\"\n for t in block_transactions:\n str_block_transactions += t + \",\"\n str_block_transactions = str_block_transactions[:-1]\n self_hash = calculate_hash(previous_block_hash, merkle_root_hash, nonce)\n block = (\n 0, block_number, timestamp, difficulty, nonce, previous_block_hash, merkle_root_hash,\n str_block_transactions,\n self_hash)\n return Block(block)", "def parseBlock(self, text, prevLineData):\n return self.parser.parseBlock(text, prevLineData)", "def _from_string(cls, serialized):\r\n course_key = CourseLocator._from_string(serialized)\r\n parsed_parts = cls.parse_url(serialized)\r\n block_id = parsed_parts.get('block_id', None)\r\n if block_id is None:\r\n raise InvalidKeyError(cls, serialized)\r\n return cls(course_key, parsed_parts.get('block_type'), block_id)", "def from_json(cls, json_str):\n obj = json.loads(json_str)\n fields = [\"header\", \"transactions\"]\n if not all(elem in obj.keys() for elem in fields):\n raise Exception(\"Block JSON string is invalid.\")\n header_fields = [\"prev_hash\", \"root\", \"timestamp\", \"nonce\"]\n if not all(elem in obj[\"header\"].keys() for elem in header_fields):\n raise Exception(\"Block JSON header is invalid.\")\n block = cls(obj[\"header\"], obj[\"transactions\"])\n if not block.validate():\n return None\n return block", "def makeBlock(tag):\n return {\"t\":\"RawBlock\",\"c\":[\"html\",tag]}", "def create_block_from_xml(xml_data, system, id_generator):\r\n node = etree.fromstring(xml_data)\r\n raw_class = system.load_block_type(node.tag)\r\n xblock_class = system.mixologist.mix(raw_class)\r\n\r\n # leave next line commented out - useful for low-level debugging\r\n # log.debug('[create_block_from_xml] tag=%s, class=%s' % (node.tag, xblock_class))\r\n\r\n block_type = node.tag\r\n url_name = node.get('url_name')\r\n def_id = id_generator.create_definition(block_type, url_name)\r\n usage_id = id_generator.create_usage(def_id)\r\n\r\n scope_ids = ScopeIds(None, block_type, def_id, usage_id)\r\n xblock = xblock_class.parse_xml(node, system, scope_ids, id_generator)\r\n\r\n _convert_reference_fields_to_keys(xblock)\r\n\r\n return xblock", "def transform_block(block):\n return {\n 'type': 'block',\n 'children': [transform_child(child) for child in block]\n }", "def from_bytes(b):\n bh, b = BlockHeader.from_bytes(b)\n num_txns, b = unpack_compact_int(b)\n txns = []\n for i in range(num_txns):\n t, b = Transaction.from_bytes(b)\n txns.append(t)\n\n return Block.from_blockheader(bh, txns), b", "def str_to_block(s, block_size):\n n = len(s)/block_size\n res = []\n for i in range(n):\n sub_s = s[i*block_size:i*block_size+block_size]\n res.append(sub_s)\n return res", "def blockParser(block):\n struct = []\n first = True\n record = False\n for line in block:\n if line.startswith('Structure #'):\n record = True\n if not first:\n yield struct\n struct = []\n first = False\n if record:\n struct.append(line)\n yield struct", "def parse_blocks(fblocks):\n print('Parse blocks: ', end='')\n result = []\n\n for line in fblocks:\n stripped = line.strip()\n if len(stripped) > 0 and stripped[0] != '#':\n match = re.match(r\"([0-9A-F]+)\\.{2}([0-9A-F]+);\\s+(.+)\", stripped)\n result.append({\n 'begin': int(match.group(1), 16),\n 'end': int(match.group(2), 16),\n 'name': match.group(3)\n })\n\n print('done')\n return result", "def __ParseBlock(self, ast):\n for node in ast:\n node_name = node[0]\n node_value = node[1]\n if node_name == 'statement':\n self.__ParseStatement(node_value)\n else:\n logging.info('Unknown AST node in message block: %s' % (node_name))", "def from_etree(elem):\n assert elem.tag == \"block\", elem.tag\n\n # Create the block with basic attributes\n block = Block(name=elem.attrib[\"name\"], instance=elem.attrib[\"instance\"], mode=elem.get(\"mode\", \"default\"))\n\n # Parse ports\n rotation_maps = {}\n for tag in [\"inputs\", \"outputs\", \"clocks\"]:\n port_type = tag[:-1]\n\n xml_ports = elem.find(tag)\n if xml_ports is not None:\n for xml_port in xml_ports:\n # Got a port rotation map\n if xml_port.tag == \"port_rotation_map\":\n port_name = xml_port.attrib[\"name\"]\n rotation = xml_port.text\n\n # Parse the map\n rotation_map = {}\n for i, j in enumerate(rotation.strip().split()):\n if j != \"open\":\n rotation_map[i] = int(j)\n\n # Store it to be later associated with a port\n rotation_maps[port_name] = rotation_map\n\n # Got a port\n else:\n port = Port.from_etree(xml_port, port_type)\n block.ports[port.name] = port\n\n # Associate rotation maps with ports\n for port_name, rotation_map in rotation_maps.items():\n assert port_name in block.ports, port_name\n block.ports[port_name].rotation_map = rotation_map\n\n # Recursively parse sub-blocks\n for xml_block in elem.findall(\"block\"):\n sub_block = Block.from_etree(xml_block)\n\n sub_block.parent = block\n block.blocks[sub_block.instance] = sub_block\n\n # Parse attributes and parameters\n for tag, data in zip([\"attributes\", \"parameters\"], [block.attributes, block.parameters]):\n # Find the list\n xml_list = elem.find(tag)\n if xml_list is not None:\n # Only a leaf block can have attributes / parameters\n assert block.is_leaf, \"Non-leaf block '{}' with {}\".format(block.instance, tag)\n\n # Parse\n sub_tag = tag[:-1]\n for xml_item in xml_list.findall(sub_tag):\n data[xml_item.attrib[\"name\"]] = xml_item.text\n\n return block", "def handleBlock(self, block_str):\n\n new_block = Block()\n new_block.parseJson(block_str)\n if not self.checkBlock(new_block):\n return False\n\n if new_block.confirmed:# if confirmed by someone, check and add block\n if valid_proof_of_work(new_block):\n print(\"Someone done before me, I'm going to stop\")\n self.threadjob = False\n self.BlockChain.addBlock(new_block) \n else:\n raise Exception(\"Hey this Block's Hash is not valid\")\n return False\n\n elif self.miner_indicator:\n if self.threadjob:\n print(\"This is miner, I should mine, but I'm already doing \")\n return True\n print(\"Hey this is miner, I'm going to mine\")\n self.threadjob = True\n t = Thread(target=self.mine, args = (new_block,))\n t.start()\n else:\n print(\"Hey this is unconfirmed block , but I am not miner, so I gonna miss it\")\n return True", "def parser(block):\n def unwrap(result):\n tupe, value = result\n if tupe & ParserResult.DONE:\n value, chunk, last = value\n return (ParserResult.from_done(value.value, chunk, last) if value.error is None else\n ParserResult.from_error(value.error))\n elif tupe & ParserResult.PARTIAL:\n return ParserResult.from_partial(Parser(lambda chunk, last: unwrap(value(chunk, last))))\n else:\n return result\n do_block = do(Parser)(block)\n return F.wraps(block)(\n lambda *args, **kwargs: Parser(\n lambda chunk, last: unwrap(do_block(*args, **kwargs)(chunk, last))))", "def add_block_str(self, block_str):\n return self._add_block_str(block_str, True, False)", "def parse_graph(self, graph_str, use_bs=True):\n blocks = graph_str.strip().split('BasicBlock ')\n \n # 1st iteration: collect block labels and node's bvars\n for block in blocks:\n if len(block) == 0: # skip empty lines\n continue\n\n block_var, instructions = block.split(':', 1)\n\n if instructions.find('<label>:') != -1:\n #raise Exception(\"Not supported\") # NOTE: legacy error, need to check when it happens though\n label = instructions.split('<label>:')[1].split(' ')[0]\n else:\n label = instructions.split('\\n', 1)[1].split(':')[0]\n\n if \"bd_\" in block_var: # only bd_ blocks can be start/end nodes\n # 'b' stands for block, 'd' for dominant\n if self._start_var is not None:\n self._end_var = block_var\n else:\n self._start_var = block_var\n\n self._label2var[label] = block_var\n self._var2label[block_var] = label\n\n # 2nd iteration: collect nodes and edges\n for block in blocks:\n if len(block) == 0: # skip empty lines\n continue\n\n # collect node\n block_var, instructions = block.split(':', 1)\n block_label = self._var2label[block_var]\n block_cost = int(instructions.split(' ')[1])\n block_dominator = instructions.split('Dominator = ', 1)[1].split('\\n', 1)[0]\n if block_dominator == 'NULL':\n block_dominator = \"-1\"\n else:\n block_dominator = block_dominator.split('_')[1]\n\n if block_var == self._start_var and use_bs:\n block_var = block_var.replace(\"bd_\", \"bs_\")\n\n b = Node(block_var, block_label, block_cost, block_dominator, self)\n\n self._nodes[b.get_uid()] = b\n self._label2uid[block_label] = b.get_uid()\n\n # collect edges\n if instructions.find('br ') != -1:\n for succ in instructions.split('br ')[1].strip().split('label'):\n succ = succ.strip()\n if len(succ) >= 2 and succ[0] == '%':\n dst_label = succ[1:] if succ[-1] != ',' else succ[1:-1]\n dst_var = self._label2var[dst_label]\n e = Edge(block_var, dst_var, 0, self)\n self._edges[e.get_uid()] = e\n\n if self._end_var is None:\n # single-block graph\n assert(len(self._nodes.keys()) <= 1)\n assert(len(self._edges.keys()) <= 0)\n self._end_var = self._start_var\n\n # update start/end uids\n self._start_uid = self._label2uid[self._var2label[self._start_var]]\n self._end_uid = self._label2uid[self._var2label[self._end_var]]\n\n # update nodes with ingoing/outgoing edges\n for edge_uid in self._edges.keys():\n edge = self._edges[edge_uid]\n src_block_uid = edge.get_src_uid()\n dst_block_uid = edge.get_dst_uid()\n self._nodes[src_block_uid].add_successor(dst_block_uid)\n self._nodes[dst_block_uid].add_predecessor(src_block_uid)\n return", "def str_to_id(string, block):\n if len(string) % block != 0:\n raise Exception('String length not a multiple of block={}'.format(block))\n num_blocks = len(string) // block\n return tuple([int(string[i*block: (i+1)*block]) for i in range(num_blocks)])", "def get_block_from_node(node):\n coords = node.name.split(\",\") # name format is '(row,col)'\n col = int((coords[1])[:-1])\n row = int((coords[0])[1:])\n return Block(row, col)", "def decode(string_list):\n assert isinstance(string_list, list)\n blocks_args = []\n for block_string in string_list:\n blocks_args.append(BlockDecoder._decode_block_string(block_string))\n return blocks_args" ]
[ "0.70905423", "0.70905423", "0.70402545", "0.68169737", "0.6797657", "0.6712426", "0.6638009", "0.65721875", "0.65279007", "0.64944434", "0.645922", "0.6392348", "0.63896126", "0.62067014", "0.61407346", "0.60342276", "0.59786546", "0.59596866", "0.59488565", "0.58848554", "0.5833358", "0.5826576", "0.5825889", "0.5819694", "0.5782451", "0.57756025", "0.575015", "0.57232964", "0.56825036", "0.56668305" ]
0.81313765
0
Creat a block for a trascation
def newBlock(preBlock, remitter, number, payee): index = preBlock.index + 1 timestamp = int(round(time.time() * 1000)) data = (remitter, number, payee).__str__() previousHash = preBlock.hash nounce = 0 return Blockchain(index, data, timestamp, nounce, previousHash)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_block(self):\n return poet_transaction_block.PoetTransactionBlock()", "def gen_new_block(self):\n block = BasicBlock()\n self.blocks.append(block)\n return block", "def NewBlock(self):\n for i in self.matrix:\n if 2 in i:\n return()\n blockType = self.bag.Choose()\n subtractor = {\"I\" : 4, \"J\" : 3, \"L\" : 3, \"O\" : 2, \"S\" : 3, \"T\" : 3, \"Z\": 3}\n x = random.randint(0, self.width - subtractor.get(blockType))\n coords = []\n if blockType == \"I\":\n coords = [(x + i, 0) for i in range(4)]\n elif blockType == \"J\":\n coords = [(x + i, 0) for i in range(3)]\n coords.append((x, 1))\n elif blockType == \"L\":\n coords = [(x + i, 0) for i in range(3)]\n coords.append((x + 2, 1))\n elif blockType == \"O\":\n coords = [(x, 0), (x + 1, 0), (x, 1), (x + 1, 1)]\n elif blockType == \"Z\":\n coords = [(x, 0), (x + 1, 0), (x + 1, 1), (x + 2, 1)]\n elif blockType == \"S\":\n coords = [(x + 1, 0), (x + 2, 0), (x, 1), (x + 1, 1)]\n elif blockType == \"T\":\n coords = [(x, 0), (x + 1, 0), (x + 2, 0), (x + 1, 1)]\n self.coords = coords\n return(coords)", "def create_block(self, complete_hash, nonce):\n print(\"Creating block with hash: '%s'\" % complete_hash)\n block = Block(complete_hash, nonce)\n for transaction in self.transactions:\n block.add_transaction(transaction)\n return block", "def createBlock(self, block: ghidra.program.model.mem.MemoryBlock, name: unicode, start: ghidra.program.model.address.Address, length: long) -> ghidra.program.model.mem.MemoryBlock:\n ...", "def create_block(self, nonce, previous_hash) -> None:\n block = {\n 'block_number': len(self.chain) + 1,\n 'timestamp': time(),\n 'transactions': self.transactions,\n 'nonce': nonce,\n 'previous_hash': previous_hash\n }\n\n self.transactions = []\n self.chain.append(block)", "def new_block(self, proof, previous_hash = None):\n #create a new Block & adds it to the chain.\n \n block = {\n 'index' : len(self.chain) + 1,\n 'timestamp' : time(),\n 'transactions' : self.pending_transactions,\n 'proof' : proof,\n 'previous_hash' : previous_hash or self.hash(self.chain[-1])\n }\n\n # Reset the current list of transactions\n self.pending_transactions = []\n\n self.chain.append(block)\n return block\n #pass", "def create_from_transaction(tx, prev_hash):\n\n tx_hash = HashAssist.hash_value(tx.to_string_for_hashing())\n\n print(\"Mining nonce....\")\n nonce = proof.mint(prev_hash + tx_hash, WORK_FACTOR)\n header_hash = HashAssist.hash_value(prev_hash + tx_hash + nonce)\n\n return Block(header_hash, prev_hash, nonce, tx_hash, tx)", "def create_block(self, nonce, previous_hash):\n block = {'block_number': transaction_blocks.count() + 1,\n 'timestamp': ctime(t),\n 'transactions': self.transactions,\n 'nonce': nonce,\n 'previous_hash': previous_hash}\n\n # Reset the current list of transactions\n self.transactions = []\n self.chain.append(block)\n return block", "def createFirstBlock(self):\n firstBlock = Block(0, self.__currentTransactionsList, 0, '00')\n self.__chain.append(firstBlock)", "def create_genesis_block(self):\n index = 0\n transactions = []\n timestamp = 0.0\n previous_hash = \"0\"*64\n block = Block(index=index, transactions=transactions, timestamp=timestamp,previous_hash=previous_hash)\n block.hash = block.compute_hash()\n self.chain.append(block)", "def _make_block(self, model):\n # TODO Make base class\n assert model is not None, 'Top level model must be initialized first'\n self.model = model\n # If block is already present, remove it\n if self.model.component(self.name) is not None:\n self.model.del_component(self.name)\n self.model.add_component(self.name, Block())\n self.block = self.model.__getattribute__(self.name)\n\n self.logger.info(\n 'Optimization block initialized for {}'.format(self.name))", "def create_block(self, proof, previous_hash=None):\n\n block = {\n 'index': len(self.chain) + 1,\n 'timestamp': time(),\n 'transactions': self.current_transactions,\n 'proof': proof,\n 'previous_hash': previous_hash,\n }\n\n # Reset current list of transactions\n self.current_transactions = []\n\n self.chain.append(block)\n return block", "def createNewBlock(self, nonce, previousBlockHash, hash):\n newBlock = Block(len(self.chain), self.pendingTransactions, nonce, hash, previousBlockHash)\n self.pendingTransactions = []\n self.chain.append(newBlock)\n return newBlock", "def add_block(self, env):\n block_size = (0.04, 0.04, 0.04)\n block_pose = self.random_pose(env, block_size)\n block_urdf = 'assets/stacking/block.urdf'\n block_id = env.add_object(block_urdf, block_pose)\n self.object_points[block_id] = np.float32((0, 0, 0)).reshape(3, 1)\n self._IDs[block_id] = 'block'\n return block_id", "def create_block(self, x, y, block_type):\n sprite_stack = self.get_sprite(x, y)\n if sprite_stack:\n sprite = sprite_stack[-1]\n sprite.image = block_type\n return\n\n # no existing block, so create a new one\n block_x = x * self.block_x + self.offset_x + self.menu_x\n block_y = y * self.block_y + self.offset_y\n\n bar = Sprite(\"\", image_data=block_type, x=block_x, y=block_y)\n if (x, y) in self.sprites:\n self.sprites[(x, y)].append(bar)\n else:\n self.sprites[(x, y)] = [bar]", "def new_block(self, proof, previous_hash=None):\r\n block = {\r\n 'index': len(self.chain) + 1,\r\n 'timestamp': time(),\r\n 'transactions': self.current_transactions,\r\n 'proof': proof,\r\n 'previous_hash': previous_hash or self.hash(self.chain[-1]),\r\n }\r\n\r\n # reseta a atual lista de transacoes\r\n self.current_transactions = []\r\n\r\n self.chain.append(block)\r\n return block", "def create_block(self, previous_hash):\r\n if len(self.transaction_pool) < 1:\r\n return None, None\r\n\r\n # Create A Temporary Block\r\n block = {'index': None, # before mining set index to None\r\n 'timestamp': None, # before mining set timestamp to None\r\n 'nonce': 0, # before mining set nonce to 0\r\n 'transactions': self.transaction_pool, # Fill in all the transactions\r\n 'previous_hash': previous_hash, # Set the previous hash\r\n 'current_hash': ''} # Current hash is yet to be calculated\r\n\r\n # Empty Transaction Pool\r\n self.transaction_pool = [] # Once transactions have been placed in a block\r\n # they can be removed from the pool\r\n\r\n # Calculate Proof Of Work (Nonce)\r\n block['nonce'], block['current_hash'] = self.proof_of_work(block, previous_hash) # Validate the block by calculating the nonce\r\n block['index'] = len(self.chain) + 1 # Set the block index\r\n block['timestamp'] = str(datetime.datetime.now()) # Set the timestamp to the time when the block was validated\r\n\r\n # Add Block To DistrictNode's Own Chain\r\n self.chain.append(block) # Append the block to the list of blocks in the blockchain\r\n print(\"BLOCK ADDED TO 90\")\r\n for block in self.chain:\r\n for key, value in block.items():\r\n print(key, value)\r\n print('\\n')\r\n\r\n return self.chain, self.transaction_pool # Return the new chain and the new transaction_pool\r", "def create_block_message(self, block):\n msg = poet_transaction_block.PoetTransactionBlockMessage()\n msg.TransactionBlock = block\n return msg", "def CreateBlock(self, props):\n # Set some default properties\n\n # blk / cfg | name | name+inst\n # -----------------------------\n # name | n==n | False\n # name+inst | n==n | n==n&&i==i\n if 'ordinal' not in props and 'name' in props:\n for ordinal, ni in self.config.get('ordinals', {}).items():\n if isinstance(ni, str):\n # Config only has name, don't care about block\n if props['name'] == ni:\n props['ordinal'] = ordinal\n break\n elif 'instance' not in props:\n # Config has name+instance, block only has name\n continue\n else:\n # Config has name+instance, block has name+instance\n if props['name'] == ni[0] and props['instance'] == ni[1]:\n props['ordinal'] = ordinal\n break\n\n # Compute a block ID\n if 'name' in props:\n bid = props['name'].replace('-', '')\n else:\n bid = \"block\"\n\n if bid in self.blocks:\n i = 1\n while bid+str(i) in self.blocks:\n i += 1\n bid = bid+str(i)\n\n # Actually create block\n self.blocks[bid] = blk = Block(bid, props)\n blk.changed.handler(lambda: self.blockchanged(blk))\n\n self.blockadded(blk)\n\n return blk", "def create_block(world: World, block_id: str, x: int, y: int, *args):\n block_id = BLOCKS[block_id]\n if block_id == \"mystery_empty\":\n block = MysteryBlock()\n elif block_id == \"mystery_coin\":\n block = MysteryBlock(drop=\"coin\", drop_range=(3, 6))\n elif block_id == \"bounce_block\":\n block = BounceBlock()\n elif block_id == \"flag\":\n block = Flagpole()\n elif block_id == \"tunnel\":\n block = Tunnel()\n elif block_id == \"switch\":\n block = Switch()\n else:\n block = Block(block_id)\n\n world.add_block(block, x * BLOCK_SIZE, y * BLOCK_SIZE)", "def newblock(self, parent=None):\n block = ControlBlock()\n self.blocks.add(block)\n if parent:\n parent.add_child(block)\n return block", "def __init__(self, _sequence, _structure_offset,\n _invert_y, _invert_init_angle, _reverse_actuation,\n _bot_color, _top_color,\n _name,\n _r1, _r2,\n _theta1, _theta2,\n _leg_length):\n\n # Define sequence\n self.sequence = _sequence\n self.structure_offset = _structure_offset\n self.invert_y = _invert_y\n self.bot_color = _bot_color\n self.top_color = _top_color\n self.name = _name\n self.invert_init_angle = _invert_init_angle\n if _reverse_actuation:\n self.invert_init_angle = not self.invert_init_angle\n\n # Create first block\n _d_bot = np.arccos(_theta1) * _r1 / 2\n _d_top = np.arccos(_theta2) * _r2 / 2\n _d_mid = 1 / 100\n _w = 5.5 / 100\n _h = 5.5 / 100\n\n _center = Coordinate(x=0, y=_d_bot - (_h / 2))\n self.block_bot = Block(\n _width=_w,\n _height=_h,\n _center=_center,\n _anchor_d=_d_bot,\n _color=self.bot_color,\n _type='bottom'\n )\n\n # Create mid block\n _center = Coordinate(x=0, y=_r1 - _d_mid + (_h / 2))\n self.block_mid = Block(\n _width=_w,\n _height=_h,\n _center=_center,\n _anchor_d=_d_mid,\n _color=Utils.black,\n _type='middle'\n )\n\n # Create top block\n _center = Coordinate(x=0, y=self.block_mid.get_anchor(type=\"t\").y + _r2 - _d_top + (_h/2))\n self.block_top = Block(\n _width=_w,\n _height=_h,\n _center=_center,\n _anchor_d=_d_top,\n _color=self.top_color,\n _type='top'\n )\n\n # Create the bars_bot\n self.bars_bot = Arm(\n self.block_bot.get_anchor(type=\"t\"),\n self.block_mid.get_anchor(type=\"b\"),\n _r1,\n self.block_bot.get_anchor_distance()\n )\n\n # Create the bars_top\n self.bars_top = Arm(\n self.block_mid.get_anchor(type='t'),\n self.block_top.get_anchor(type='b'),\n _r2,\n self.block_mid.get_anchor_distance()\n )\n\n # Create the spring_bot\n self.spring_bot = Spring(\n _P=Coordinate(x=0, y=self.block_bot.get_anchor(type='t').y),\n _Q=Coordinate(x=0, y=self.block_mid.get_anchor(type='b').y)\n )\n\n # Create the spring_top\n self.spring_top = Spring(\n _P=Coordinate(x=0, y=self.block_mid.get_anchor(type='t').y),\n _Q=Coordinate(x=0, y=self.block_top.get_anchor(type='b').y)\n )\n\n # Compute Theta_s - limits of the angle for the bar.\n self.theta_s_bot = np.arccos(2 * self.block_bot.anchor_d / self.bars_bot.length)\n self.theta_s_top = np.arccos(2 * self.block_mid.anchor_d / self.bars_top.length)\n\n self.theta_i_bot = 0\n self.theta_i_top = 0\n\n self.leg_length = _leg_length\n\n self.A = []\n self.B = []\n self.C = []\n\n self.ground_distance = 0.0\n\n self.init_position()", "def new_block(self, proof, previous_hash=None):\n\n\t\tblock = {\n\t\t\t'index': len(self.chain) + 1,\n\t\t\t'timestamp': time(),\n\t\t\t'transactions': self.current_transactions,\n\t\t\t'proof': proof,\n\t\t\t'previous_hash': previous_hash or self.hash(self.chain[-1]),\t\t\n\t\t}\n\n\t\t#Reset current list of transactions\n\t\tself.current_transactions = []\n\n\t\tself.chain.append(block)\n\t\treturn block", "def makeBlock(tag):\n return {\"t\":\"RawBlock\",\"c\":[\"html\",tag]}", "def new_block(self, proof, previous_hash=None):\n \n block = {\n 'index': len(self.chain) + 1,\n 'timestamp': time(),\n 'transactions': self.current_transactions,\n 'proof': proof,\n 'previous_hash': previous_hash or self.hash(self.chain[-1]),\n }\n\n # Reset the current list of transactions\n self.current_transactions = []\n # Add block to existing chain\n self.chain.append(block)\n return block", "def create_block():\n global BLOCK\n posx = SEG_SIZE * random.randint(1, (WIDTH-SEG_SIZE) / SEG_SIZE)\n posy = SEG_SIZE * random.randint(1, (HEIGHT-SEG_SIZE) / SEG_SIZE)\n BLOCK = c.create_oval(posx, posy,\n posx+SEG_SIZE, posy+SEG_SIZE,\n fill=\"red\")\n # print(posx, posy)\n return posx, posy", "def _construct_block(self, block_info):\n layer_name = block_info[0]\n if layer_name=='Conv2d':\n in_channels, out_channels, kernel_size = block_info[1:]\n return nn.Conv2d(in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size)\n elif layer_name=='ReLU':\n return nn.ReLU(inplace=True)\n elif layer_name=='MaxPool2d':\n kernel_size, stride = block_info[1:]\n return nn.MaxPool2d(kernel_size=kernel_size,\n stride=stride)\n elif layer_name=='BatchNorm2d':\n num_features = block_info[1]\n return nn.BatchNorm2d(num_features=num_features)\n elif layer_name=='Linear':\n in_features, out_features = block_info[1:]\n return nn.Linear(in_features=in_features,\n out_features=out_features)\n else:\n raise Exception(\"_construct_block cannot construct block\")", "def new_block(self, proof, previous_hash=None):\n servers = [\n \"1.us.pool.ntp.org\",\n \"2.us.pool.ntp.org\",\n \"3.us.pool.ntp.org\"\n ]\n\n response = {}\n\n try:\n response = self.c.request('0.us.pool.ntp.org')\n except Exception:\n for server in servers:\n try:\n response = self.c.request(server)\n\n if response:\n break\n\n except Exception:\n print('\\n //// alternate ntp server didnt work')\n\n block = {\n 'message': 'New Block Forged',\n 'index': len(self.chain) + 1,\n 'timestamp': response.tx_time or time(),\n 'transactions': self.current_transactions,\n 'proof': proof,\n 'previous_hash': previous_hash or self.chain[-1]['hash'],\n }\n\n # Calculate the hash of this new Block\n block['hash'] = self.hash(block)\n\n # Reset the current list of transactions\n self.current_transactions = []\n\n self.chain.append(block)\n return block", "def __init__(self):\n self.unconfirmed_transactions = [] \n self.chain = []\n self.create_genesis_block()" ]
[ "0.68695533", "0.67798185", "0.6753776", "0.6749143", "0.6747121", "0.6487674", "0.6477238", "0.64685786", "0.6462074", "0.6456521", "0.6425149", "0.6414496", "0.64110404", "0.64087576", "0.6393785", "0.6349181", "0.6316754", "0.63043255", "0.63032055", "0.6290305", "0.6282599", "0.6243371", "0.62327534", "0.6225902", "0.6185653", "0.61820275", "0.6163458", "0.6157916", "0.6127525", "0.6123242" ]
0.6858441
1
This function handles the /practices/create endpoint for the blueprint
def practices_create(): practice = Practice() form = PracticeCreateForm() if form.validate_on_submit(): form.populate_obj(practice) db.session.add(practice) db.session.commit() return redirect(url_for('practices.home')) return render_template('practices/create.html', form=form)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create():\n data = request.get_json()\n print(\"DATA: \", data)\n db_helper.insert_new_record(data['first_name'], data['last_name'], data['class_period'], data['current_belt'], data['student_teacher_id'])\n result = {'success': True, 'response': 'Done'}\n return jsonify(result)", "def create_doctor():\n first_name = request.json['first_name']\n second_name = request.json['second_name']\n last_name = request.json['last_name']\n email = request.json['email']\n specialization = request.json['specialization']\n calendar_id = request.json['calendar_id']\n\n new_doctor = Doctor(first_name, second_name, last_name, email, calendar_id, specialization)\n db.session.add(new_doctor)\n db.session.commit()\n return doctor_schema.jsonify(new_doctor)", "def create():\n pass", "def new():\n\n add_review = True\n\n form = CreateReview()\n if form.validate_on_submit():\n\n try:\n review = {\n \"score\": float(form.score.data),\n \"description\": form.description.data,\n \"games_id\": form.game_id.data,\n \"users_id\": form.users_id.data\n }\n\n print(review)\n new_review = Reviews()\n new_review.create(**review)\n \n # add employee to the database\n flash('You have successfully created a Review.')\n except:\n # in case department name already exists\n flash('Error: review already exists.')\n \n\n # redirect to the login page\n return redirect(url_for('review.index'))\n\n return render_template('review/new.html', action=\"Add\", add_review=add_review, form=form, title=\"Add Review\")", "def create(self, validated_data):", "def create_meal():", "def create(self, *args, **kwargs):\n pass", "def create():", "def create():", "def create_person(self):", "def test_create(self):\n pass", "def create(self):\n ...", "def create(ctx):\n pass", "def create_question():\n if request.content_type != \"application/json\":\n abort(415)\n question_text = request.json['question']\n answer = request.json['answer']\n difficulty = request.json['difficulty']\n category = request.json['category']\n\n question_object = Question(question_text, answer, category, difficulty)\n db.session.add(question_object)\n db.session.commit()\n return jsonify({\n \"success\": True\n }), 201", "def test_client_risk_assessment_create(self):\n pass", "def test_commentary_view_create(self):\n \n test_response = self.client.get('/papers/commentary/new')\n self.assertEqual(test_response.status_code, 200)\n self.assertTemplateUsed(test_response, 'base.html')\n self.assertTemplateUsed(test_response, 'commentary-form.html') \n self.assertTemplateUsed(test_response, 'analytics_tracking.html')", "def test_create(self):\n self.assertEqual(Exercise.objects.count(), 2)\n payload = {\n 'name': 'Pecho plano',\n 'description': 'Some description',\n 'muscle_group': 'pecho'\n }\n self.client.post('/exercises/', data=payload)\n self.assertEqual(Exercise.objects.count(), 3)", "def create(self):\n\n pass", "def create(self):", "def create(self):\n pass", "def create(self):\n pass", "def create(self):\n pass", "def create_book():\n Book.objects.create(book_id=\"test_id\",\n title=\"test_title\",\n authors=\"test_author\",\n published_date=\"2021\",\n categories=[\"test_category\"],\n average_rating=5,\n ratings_count=5,\n thumbnail=\"http://books.google.com/books/test\"\n )", "def create(self, request, *args, **kwargs):\n result = Class.objects.filter(id = request.data.get(\"school_class\",)).filter(instructor_id = request.user.id)\n if result.count() == 1:\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n return Response(serializer.data, status=201)\n return Response({\"error\": \"To do this, you must be the instructor of this class.\"},status = 403)", "def partner_create(self):\n try:\n mongo_module.mongo_insert(self.partner)\n output = 'sucesfully created'\n code = 201\n except Exception as err:\n output = str(err)\n code = 409\n return output, code", "def createquiz():\n if not current_user.check_educator():\n return render_template('errors/error403.html'), 403\n classForm = NameForm(prefix='class')\n quizForm = NameForm(prefix='quiz')\n image_file = get_image_file(current_user)\n if quizForm.validate_on_submit():\n quiz = add_quiz(current_user, quizForm.title.data)\n if quiz is None:\n flash('You have already created a Quiz with this name. Please choose a different name.', 'warning')\n return redirect(url_for('main.dashboard'))\n return redirect(url_for('quiz.createqn', quizID=quiz.id))\n return render_template('dashboard.html', image_file=image_file, classForm=classForm, quizForm=quizForm)", "def test_api_can_create_a_book(self):\n book = {\n 'bookid': '23',\n 'title': 'Neues Buch',\n 'description': 'Unsinn',\n 'seite50_sentence': 'Ein neuer Satz ohne Verb.',\n 'published_date': '1980-01-01',\n }\n res = self.client.post(\n reverse('create_book'), book, format=\"json\")\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)", "def post_create():\n req_data = request.get_json()\n\n print('This is the request itself \\n', req_data)\n name = req_data['name']\n chapter = req_data['chapter']\n site = req_data['site']\n print('\\nThe function that is selected: {0} {1} {2}\\n'.format(name, chapter, site))\n flask_wms.write_new_data(name, chapter, site, \"False\")\n return 'Request recieved, create method'", "def create(self,request):\n serializer = self.serializer_class(data= request.data)\n\n if serializer.is_valid():\n name = serializer.validated_data.get('name')\n surname = serializer.validated_data.get('surname')\n message = f'Hello {name} {surname}'\n return Response({'message': message})\n else:\n return Response(\n serializer.errors,\n status = status.HTTP_400_BAD_REQUEST\n )", "def createTopic():\n data = request.json\n if \"agenda_id\" in data and \"section_position\" in data and \"topic_position\" in data and \"topic_json\" in data:\n if connectMongo.getAgendaById(data.get(\"agenda_id\")).found:\n responseWrapper = connectMongo.createNewTopic(data.get(\"agenda_id\"), data.get(\"section_position\"),\n data.get(\"topic_position\"),\n data.get(\"topic_json\"))\n return jsonify(response=200, agenda=responseWrapper.object.makeJson())\n else:\n return jsonify(response=404, msg=\"Agenda not found\")\n else:\n return jsonify(response=400, msg=\"you didn't sent all the necessary information\")" ]
[ "0.62720287", "0.6226054", "0.62232095", "0.6165267", "0.61319274", "0.61049116", "0.60810924", "0.6080852", "0.6080852", "0.5967741", "0.59200597", "0.5918674", "0.58601415", "0.5856752", "0.58543026", "0.5830439", "0.58303565", "0.58087665", "0.58066255", "0.5784546", "0.5784546", "0.5784546", "0.5771714", "0.5761254", "0.5759058", "0.5756343", "0.57399774", "0.5715238", "0.5715225", "0.5712032" ]
0.81431204
0
Generator for sequence. Will return num equally spaced items from the sequence.
def takespread(sequence, num): length = float(len(sequence)) for i in range(num): yield sequence[int(np.ceil(i * length / num))]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def takespread(sequence, num):\n length = float(len(sequence))\n for i in range(num):\n yield sequence[int(ceil(i * length / num))]", "def takeNGenerator(seq, n):\n\tindex = 0\n\twhile index + n <= len(seq):\n\t\tyield seq[index:index + n]\n\t\tindex = index + 1", "def iter_n(sequence: Sequence[T], n: int) -> List[T]:\n\t\n\tfor i in range(len(sequence) - (n-1)):\n\t\tyield sequence[i:i+n]", "def group(seq, size):\n if not hasattr(seq, 'next'):\n seq = iter(seq)\n while True:\n yield [seq.next() for i in xrange(size)]", "def split_by_n( seq, n ):\n while seq:\n yield seq[:n]\n seq = seq[n:]", "def kmers(self, n: int, step: int = 1) -> Generator:\n return (\n Seq(self.sequence[i : i + n]) for i in range(0, len(self.sequence), step)\n )", "def take(n, seq):\n seq = iter(seq)\n result = []\n try:\n for i in range(n):\n result.append(next(seq))\n except StopIteration:\n pass\n return result", "def split_by_n(seq, n):\n while seq:\n yield seq[:n]\n seq = seq[n:]", "def repeat(seq, n):\n for e in seq:\n for _ in range(n):\n yield e", "def count_to(count):\n numbers = [\"one\", \"two\", \"three\", \"four\", \"five\"]\n for number in numbers[:count]:\n yield number", "def take(n, seq):\n return itertools.islice(seq, n)", "def range(self, n):\n for i in range(n):\n yield self.get()", "def just(n, seq):\n it = iter(seq)\n for _ in range(n - 1):\n yield next(it, None)\n yield tuple(it)", "def window(seq, n):\n seq_it = iter(seq)\n result = tuple(it.islice(seq_it, n))\n if len(result) == n:\n yield result \n for elem in seq_it:\n result = result[1:] + (elem,)\n yield result", "def groups_of(seq, n):\n for i in range(0, len(seq), n):\n yield seq[i : (i + n)]", "def chunks(seq: Sequence[T], n: int) -> Iterator[Sequence[T]]:\n for i in range(0, len(seq), n):\n yield seq[i:i + n]", "def split_seq(seq,size):\n for i in range(0,len(seq),size):\n if i+size<len(seq) and seq[i+size] - seq[i] == size:\n yield seq[i:i+size]", "def get_sequence(self, length, x, y):\r\n try:\r\n for i in super(DirectionGenerator, self).get_sequence(length, x, y):\r\n yield i + 1\r\n except Exception as ex:\r\n raise ex", "def generator(self, random, args):\r\n if self.duplicates:\r\n max_count = [self.capacity // item[0] for item in self.items]\r\n return [random.randint(0, m) for m in max_count]\r\n else:\r\n return [random.choice([0, 1]) for _ in range(len(self.items))]", "def e_seq():\n yield 2;\n for n in count(2, 2):\n yield 1\n yield n\n yield 1", "def __iter__(self):\n for i in range(self.n):\n yield self.get(i, i + 1)", "def iwindow(seq, n):\n it = iter(seq)\n result = tuple(islice(it, n))\n\n if len(result) == n:\n yield result\n\n for elem in it:\n result = result[1:] + (elem,)\n yield result", "def numbers():\n for number in range(1, 76):\n yield number", "def slice_generator(\n sequence_length,\n n_blocks):\n return ((int(round((b - 1) * sequence_length/n_blocks)),\n int(round(b * sequence_length/n_blocks)))\n for b in range(1, n_blocks+1))", "def chunk(size, seq):\n if not isinstance(size, int) or size <= 0: # pragma: no cover\n raise ValueError(\"size must be an integer greater than zero\")\n\n group = []\n\n for item in seq:\n if len(group) >= size:\n yield group\n group = []\n group.append(item)\n\n if group:\n yield group", "def iterslices(iterable, n, pad_last=False, pad_value=None):\n current = []\n for a in iterable:\n current.append(a)\n if len(current) == n:\n yield current\n current = []\n if current:\n if pad_last:\n current += [pad_value] * (n-len(current))\n yield current", "def __next__(self):\n self.n += 2\n if self.n > self.container.maximum:\n raise StopIteration\n return self.n", "def __next__(self):\n self.n += 2\n if self.n > self.container.maximum:\n raise StopIteration\n return self.n", "def window(seq, n=2):\n it = iter(seq)\n result = tuple(islice(it, n))\n if len(result) == n:\n yield result\n for elem in it:\n result = result[1:] + (elem,)\n yield result", "def window(seq, n=2):\n it = iter(seq)\n result = tuple(islice(it, n))\n if len(result) == n:\n yield result\n for elem in it:\n result = result[1:] + (elem,)\n yield result" ]
[ "0.69585615", "0.6918271", "0.6467057", "0.6417184", "0.6414611", "0.638621", "0.6358035", "0.6358023", "0.6356029", "0.62794334", "0.62706125", "0.62275124", "0.62054384", "0.6202193", "0.6199912", "0.6168549", "0.61490697", "0.61056083", "0.61028194", "0.6096058", "0.60837585", "0.6080288", "0.6068386", "0.6067369", "0.60491616", "0.6045798", "0.60156685", "0.60156685", "0.59890145", "0.59890145" ]
0.69746965
0
Adds an object (or an object of the given class) with the given name to the runtime object.
def add_object(self, name, cls_or_object): if self._internal.is_node_registered_within_endpoint(): raise CloudioModificationException('A CloudioRuntimeObject\'s structure can only be modified before' + ' it is registered within the endpoint!') # Check if parameter is a class if not isinstance(cls_or_object, CloudioObject): # Create an object of that class cls = cls_or_object obj = cls() # Create an object of that class self.add_object(name, obj) return obj else: # We have an CloudioObject to add to the node obj = cls_or_object obj._internal.set_parent_object_container(self) obj._internal.set_name(name) # Add object to the objects container assert name not in self._internal.objects, 'Object with given name already present!' self._internal.objects[name] = obj
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addObject(self, name, object):\n self.map[name] = object", "def add_object(self, name, obj):\n if not isinstance(obj, SceneObject):\n raise ValueError('obj must be an object of type SceneObject')\n self._objects[name] = obj\n self.close_renderer()", "def _add_object(self, name, model, *args, **kwargs):\n logger.debug('Adding object with name \"{}\" to model.'.format(name))\n obj = model(weakref.proxy(self), name, *args, **kwargs) # Add hidden hard reference\n self._objects.append(obj)\n return self.get_object(obj.name)", "def add(self, name, obj):\n obj = super(Assembly, self).add(name, obj)\n if is_instance(obj, Component):\n self._depgraph.add(obj.name)\n return obj", "def register(self, obj, name=None):\n if not name:\n name = obj.__name__\n if name in self._registry:\n raise KeyError(\"Name '%s' has been registered in '%s'!\" %\n (name, self._name))\n\n # logging.vlog(1, \"Registering %s (%s) in %s.\", name, obj, self._name)\n self._registry[name] = obj", "def _add(object, name, value):\n self.__added__.append(name)\n setattr(object, name, value)", "def add_object(self, obj):\n\t\tself.objects.append(obj)", "def add_object(sv, name):\r\n if name in sv.Object: \r\n return sv.Object[name] # do not create \r\n else:\r\n nod=nd.Node() # create object\r\n sv.Object[name]=nod # add name to object dict (not ordered) \r\n sv.Object_list.append(name) # add name to object list (ordered) \r\n nod.name=name # object name\r\n return nod", "def Add(self, obj_type, name, node=None, obj=None):\n print \"Adding object %s, node: %s\" % (name, node)\n #check for duplicate object\n # also raise error if no such object type\n if self.ObjectExists(obj_type, name):\n raise DuplicateObjectError(name)\n \n #find out where we need to put it and stick it in there\n idx = bisect.bisect(self.objects[obj_type], name)\n if not node:\n node = game_objects.ObjectUtilities.ObjectNode(self, name, self.object_modules[obj_type])\n if obj:\n node.CopyObject(obj)\n self.objects[obj_type].insert(idx, node)\n \n #let our listeners know we added a new object and let them\n # know the parent in terms of alphabetical order\n if idx == 0:\n #if we're inserting at the start there is no preceding element\n self.sendODBEvent(ODBAdd(node, obj_type, None))\n else:\n self.sendODBEvent(ODBAdd(node, obj_type, self.objects[obj_type][idx-1].name))\n \n node.SetModified(True)\n self.MarkModified(node)", "def register_object(self, obj):\n self.modules.append(obj)", "def add_object(self, obj):\n self._objects.append(obj)", "def add(self, obj):\n raise NotImplementedError", "def add_instance(self,name):\n new = self.create_instance(name)\n self.model.append(new)\n return new", "def add(self, game_obj):\r\n self.game_objects_for_adding.append(game_obj)", "def add_object(self, obj: str):\n if obj not in self._objects:\n self._objects.append(obj)\n else:\n raise IDAlreadyExists", "def add(obj):", "def addobj(self, obj):\n self._objslock.acquire()\n if obj.objid in self._objs:\n self._objslock.release()\n raise KeyError(\"non-unique EMANE object id %s for %s\" % (obj.objid, obj))\n self._objs[obj.objid] = obj\n self._objslock.release()", "def add_class(self, name):\n if name is not None and not self.has_class(name):\n self._cached_class.append(name)\n self._update_class()", "def update_object(self, name: str) -> None:\n try:\n object = Object.from_name(name)\n except Object.NotFound:\n record = self.catalog.get(name) # must be name pattern recognized by catalog\n log.info(f'Creating new object for {name}')\n Object.add({'type_id': self.__get_type_id(record), 'aliases': self.__get_names(record),\n 'ra': record.ra, 'dec': record.declination, 'redshift': record.redshift,\n 'data': {'tns': record.to_json()}})\n else:\n # find best alternate identifier for catalog search\n for provider in ('iau', 'ztf', 'atlas'): # preferred ordering\n if provider in object.aliases:\n if name != object.aliases[provider]:\n log.debug(f'Searching with name {object.aliases[provider]} <- {name}')\n name = object.aliases[provider]\n break\n else:\n raise TNSError(f'Object ({name}) not found in catalog')\n record = self.catalog.get(name)\n self.__ensure_iau_pattern(record.name)\n if info := self.__build_info(object, record):\n Object.update(object.id, **info)\n else:\n log.info(f'No changes found for {name}')", "def add(self, *args, **kwargs):\n obj = self._class(*args, **kwargs)\n self._items.append(obj)", "def putobjname(self,objname_): # 3\n res = self.__obj.putobjname(objname_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def add_object(self, object_type, data=None, read_from_netbox=False, source=None):\n\n # create new object\n new_object = object_type(data, read_from_netbox=read_from_netbox, inventory=self, source=source)\n\n # add to inventory\n self.base_structure[object_type.name].append(new_object)\n\n if read_from_netbox is False:\n log.info(f\"Created new {new_object.name} object: {new_object.get_display_name()}\")\n\n return new_object", "def add(self, name, value) -> None:\n ...", "def add(self, obj):\n self.session.add(obj)", "def register(obj_name, obj):\n if obj_name not in ninja_globals['register']:\n ninja_globals['register'][obj_name] = obj", "def AddFrameObj(self, name, frame):\n if not self.frame_objects.has_key(name):\n self.frame_objects[name] = frame\n else:\n print(\"A frame named \\\"{}\\\" already exists!\".format(name))", "def associateObject (self, obj):\n self.__associatedObjects.add(obj)", "def add_object(self, obj_data, obj_name, obj_orientation, qpmi, entity):\n self.objects.append((obj_data, obj_name, obj_orientation, qpmi, entity))\n if len(self.objects) == 1:\n self.set_default_brush()", "def addType(self, name):\n setattr(self, name, name)\n self._type_names[name] = name\n if name in self._pending_type_names:\n del self._pending_type_names[name]", "def addidfobject(self, new_object):\n key = new_object.key.upper()\n self.idfobjects[key].append(new_object)\n self._reset_dependant_vars(\"idfobjects\")" ]
[ "0.76477057", "0.7273519", "0.7175446", "0.7083584", "0.7020766", "0.70008993", "0.69030166", "0.68717223", "0.6804043", "0.67406464", "0.6653478", "0.6592018", "0.6576465", "0.6524411", "0.65112865", "0.63575375", "0.63374376", "0.6245882", "0.62386525", "0.6237244", "0.6229914", "0.62289894", "0.6160031", "0.614866", "0.6148136", "0.6142161", "0.6126475", "0.6113588", "0.6098209", "0.6083627" ]
0.7540122
1
Filter input api request
def _filter_in_request(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle_filters():\n params = {'api_key': API_KEY}\n for k in demisto.args():\n if demisto.getArg(k):\n params[k] = demisto.getArg(k)\n return params", "async def filter(self, **kwargs):\n\n pass", "def filterRansac():\n pass", "def filter_query(self, query, request, resource):\n raise NotImplementedError()", "def filter(self, *args, **kwargs):", "def filter(self, filters):", "def get_timeline_filters(self, req):", "def filter(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"filter\")", "def scrub_request(self, data):\n return self.__request_scrubber(data)", "def filter(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"filter\")", "def parse_request(self, request):\n request.process_inputs()", "def search_api(request):\n data = ApiViewFilters(request.GET, queryset=ApiView.objects.all())\n return render(request, 'template.html', {'filter': data})", "def filter(self, filter_dict):\n pass", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')", "def filter_req(req, extra):\n if extra and not req.marker:\n return False\n keep_req = True\n if req.marker:\n if not extra:\n extra = None\n keep_req = req.marker.evaluate({\"extra\": extra})\n return keep_req", "def filter(self) -> Optional[pulumi.Input['FilterArgs']]:\n return pulumi.get(self, \"filter\")", "def get_filters(self):", "def apply_search_filters():\n params = {'api_key': API_KEY}\n for k in demisto.args():\n if demisto.getArg(k):\n params['term'] = k\n params['query'] = demisto.getArg(k)\n break\n return params", "def get_filters():\r\n print('Hello! Let\\'s explore some US bikeshare data!\\n')\r\n\r\n # ref https://stackabuse.com/getting-user-input-in-python/\r\n\r\n # Get user input for city (chicago, new york city, washington).\r\n cities = ['Chicago', 'New York city', 'Washington']\r\n city = get_user_input(cities,\"city\")\r\n\r\n # Get user input for month (all, january, february, ... , june)\r\n months = ['All', 'Jan', 'Feb', 'Mar', 'Apr', 'Jun']\r\n month = get_user_input(months,\"month\")\r\n\r\n # Get user input for day of week (all, monday, tuesday, ... sunday)\r\n days = ['All', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']\r\n day = get_user_input(days,\"day\")\r\n\r\n print('-'*40)\r\n return city, month, day", "def filter_flag_present(self, req, qs):\n return qs", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n #Invalid input is administered to by using a while loop.\n while True:\n city=input(\"Choose a city name between Chicago, New York City or Washington:!\").lower()\n if city not in CITY_DATA:\n print(\"\\n Not a valid city\\n\")\n continue\n else:\n break\n\n # TO DO: get user input for month (all, january, february, ... , june)\n while True:\n try\n month=str(input('Enter name of one month(from January to June) to filter by or \"all\" ,for no filter :')).lower()\n months=['january', 'february', 'march', 'april', 'may', 'june']\n if month == 'january':\n month = months[0]\n elif month == 'february':\n month = months[1]\n elif month == 'march':\n month = months[2]\n elif month == 'april':\n month = months[3]\n elif month == 'may':\n month = months[4]\n elif month == 'june':\n month = months[5]\n elif month == 'all':\n print('all')\n else:\n raise(Exception)\n\t\t\texcept Exception as error:\n print('Invalid Input!,please restart again!.')", "def filter_output(self, request, output):\n return output", "def filter(self, filters):\r\n # because http.Request needs params to be a dict of strings to strings\r\n # (roughly) and since BitBucket wants repeated parameters to express\r\n # OR, we'll do the quoting by hand ourselves\r\n def flatten_conditions(filters):\r\n for key, val in filters.items():\r\n if isinstance(val, (list, tuple)):\r\n for v in val:\r\n yield (port.to_b(key), port.to_b(v))\r\n else:\r\n yield (port.to_b(key), port.to_b(val))\r\n\r\n to_encode = tuple(flatten_conditions(filters))\r\n qs = port.urlencode(to_encode)\r\n\r\n url = '{0}/?{1}'.format(self.get_url(), qs)\r\n return http.Request('GET', url), parsers.parse_json", "def filter(self):\n\t\tparameters = {}\n\n\t\tif self.keywords:\n\t\t\tparameters['track'] = ','.join(self.keywords)\n\n\t\tif self.locations:\n\t\t\tparameters['locations'] = ','.join([','.join([str(latlong) for latlong in loc]) for loc in self.locations])\n\n\t\tif self.usernames:\n\t\t\tparameters['follow'] = ','.join([str(u) for u in self.usernames])\n\n\t\tself.launch('statuses/filter.json', parameters)", "def get_filters():\n\n print('Hello! Let\\'s explore some US bikeshare data!')\n\n # ADD : available analysis parameters\n cities_list=['chicago','new york city','washington']\n months_list=['all','january','february','march','april','may','june']\n days_list=['all','monday','tuesday','wednesday','thursday','friday','saturday','sunday']\n\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n city=''\n while city not in cities_list:\n city=str(input(\"Enter the name of the city to analyze: \")).lower()\n if city not in cities_list:\n print(\"!Warning : cities available for analysis : {}\".format(cities_list))\n\n # TO DO: get user input for month (all, january, february, ... , june)\n month=''\n while month not in months_list:\n month=str(input(\"Enter the month to analyze (enter 'all' if you want all the months): \")).lower()\n if month not in months_list:\n print(\"!Warning : months available for analysis : {}\".format(months_list))\n\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n day=''\n while day not in days_list:\n day=str(input(\"Enter the day to analyze (enter 'all' if you want all the days): \")).lower()\n if day not in days_list:\n print(\"!Warning : days available for analysis : {}\".format(days_list))\n\n print('-'*40)\n return city, month, day", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n city =input(\"Which city do you want? please choose a city by typing (c) for chicago or (n) for new york or (w) for washington: \\n\\n \").lower()\n # Validate the city input\n while city not in (CITY_DATA.keys()):\n print(\"\\n Error: That's invalid city name.\\n\")\n city =input(\"Which city do you want? please choose a city by typing (c) for chicago or (n) for new york or (w) for washington: \\n\\n \").lower()\n # TO DO: get user input for month (all, january, february, ... , june)\n months = ['january','february','march','april','may','june','all']\n month = input(\"\\n\\nTo filter {}\\'s data by a particular month ,so please type the month or all for not filtering by month:\\n-january\\n-february\\n-march\\n-april\\n-may\\n-june\\n-all\\n\\n:\".format(city.title())).lower() \n # Validate the user input\n while month not in months:\n print(\"\\nError: That's invalid input please write a valid month name or all.\\n\") \n month =input(\"\\n\\n To filter {}\\'s data by a particular month ,so please type the month or all for not filtering by month:\\n-january\\n-february\\n-march\\n-april\\n-may\\n-june\\n-all\\n\\n:\".format(city.title())).lower() \n \n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n Days=['monday','tuesday','wednesday','thursday','friday','saturday','sunday','all']\n day =input(\"\\n\\n To filter {}\\'s data by a particular day ,so please type the day or all for not filtering by day:\\n-monday\\n-tuesday\\n-thursday\\n-friday\\n-saturday\\n-sunday\\n-all\\n\\n:\".format(city.title())).lower() \n #Validate the user input\n while day not in Days:\n print(\"\\nError: That's invalid input please write a valid day name or all.\\n\") \n day =input(\"\\n\\n To filter {}\\'s data by a particular day ,so please type the day or all for not filtering by day:\\n-monday\\n-tuesday\\n-thursday\\n-friday\\n-saterday\\n-sunday\\n-all\\n\\n:\".format(city.title())).lower() \n \n\n print('-'*40)\n return city, month, day", "def _filter_return_request_querystring(self, url, har=None):\r\n if not har:\r\n har = self.har\r\n \r\n matches = []\r\n for entry in har[\"log\"][\"entries\"]:\r\n if url in entry[\"request\"][\"url\"]:\r\n temp = entry[\"request\"][\"queryString\"]\r\n tempObject = {}\r\n for item in temp:\r\n item = json.dumps(item, ensure_ascii=True)\r\n json1 = json.loads(item)\r\n key = json1['name']\r\n val = json1['value']\r\n tempObject[key]=val\r\n tempObject['url'] = entry[\"request\"][\"url\"]\r\n tempObject['startedDateTime'] = entry[\"startedDateTime\"]\r\n matches.append(tempObject)\r\n return matches", "async def __acall__(self, request):\n try:\n if self.filter and self.filter(request):\n request.ipinfo = None\n else:\n request.ipinfo = await self.ipinfo.getDetails(\n self.ip_selector.get_ip(request)\n )\n except Exception:\n request.ipinfo = None\n LOGGER.error(traceback.format_exc())\n\n response = await self.get_response(request)\n return response", "def test_optional_filter_params(self):\n del self.internal_filter['max']\n del self.external_filter['max']\n\n # Serialize\n result = serializers.FilterSerializer(self.internal_filter).data\n self.assertDictEqual(result, self.external_filter)\n\n # Deserialize\n serializer = serializers.FilterSerializer(data=self.external_filter)\n self.assertTrue(serializer.is_valid())\n self.assertDictEqual(serializer.validated_data, self.internal_filter)", "def test_parse_filter_params_no_time(self):\n filter_params = {\n \"region\": FAKE.word(),\n \"payer_tenant_id\": FAKE.uuid4(),\n \"instance_type\": FAKE.word(),\n }\n serializer = OCIFilterSerializer(data=filter_params)\n self.assertTrue(serializer.is_valid())" ]
[ "0.6453626", "0.63813454", "0.6328844", "0.6165196", "0.61065346", "0.609105", "0.6009696", "0.5985568", "0.59085464", "0.5872922", "0.5858413", "0.5852256", "0.5820519", "0.5807515", "0.5779072", "0.5766825", "0.5754975", "0.5714173", "0.57101095", "0.56960094", "0.5687599", "0.56712925", "0.56604844", "0.5634171", "0.5617688", "0.56164587", "0.5603011", "0.5583781", "0.55795467", "0.55643225" ]
0.6802354
0
Gets a QuerySet of current objects related to ``objs`` via the relation ``related``.
def related_objects(self, related, objs): from versions.models import Versionable related_model = related.related_model if issubclass(related_model, Versionable): qs = related_model.objects.current else: qs = related_model._base_manager.all() return qs.using(self.using).filter( **{"%s__in" % related.field.name: objs} )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def related_objects(self, related_model, related_fields, objs):\n predicate = reduce(operator.or_, (\n query_utils.Q(**{'%s__in' % related_field.name: objs})\n for related_field in related_fields\n ))\n return related_model._default_manager.using(self.using).filter(\n predicate\n )", "def _get_related_objects(obj, parent_class=False):\n foreign_managers = _get_related_managers(obj, parent_class)\n\n related_objects = []\n for manager in foreign_managers:\n related_objects += manager.all()\n\n return related_objects", "def get_related_objects(self):\n result = []\n if self['name'] != None:\n tmp = ObjectDefinition.objects.filter(use__has_field=self['name'], object_type=self['object_type'])\n for i in tmp: result.append(i)\n return result", "def get_related(this_obj, other_obj, m2m=False):\n # is het niet raar dat je voor twee concrete objecten ophaalt naar welke van het ene type\n # verwezen wordt vanuit het andere type? Of is dat om de vorige/volgende te kunnen bepalen?\n # als ik kijk naar het gebruik in GetRelations dan is het tweede argument ook niet een object\n # maar een relatie (uit de fields verzameling)\n if m2m:\n fields = [x for x in other_obj._meta.many_to_many]\n else:\n fields = [x for x in other_obj._meta.get_fields() if x.name != 'project' and\n x.get_internal_type() == 'ForeignKey']\n for fld in fields:\n if fld.related_model == this_obj._meta.model:\n related_name = fld.related_query_name()\n break\n else:\n return None # not found\n try:\n return this_obj.__getattribute__(related_name).all()\n except UnboundLocalError:\n return None\n # zou je deze ook kunnen vervangen door een aanroep van get_relation en dan met de opgehaalde\n # naam de gerelateerde objecten ophalen en meteen de vorige en de volgende bepalen?\n # (heeft uiteraard konsekwenties voor de aanroepende code)\n # oorspronkelijk lijkt dat ook zo geweest te zijn, de functie heette toen get_relation en het\n # gedeelte dat nu nog zo heet was daarin hardgecodeerd\n # deze functie wordt alleen aangeroepen in een paar methoden van de hieronder opgenomen klasse\n # GetRelations, namelijk om de namen van relaties uit andere objecten naar het huidige te kunnen\n # bepalen.\n # Als je get_relation zoals die nu is gebruikt zou je dat onderscheid (van versus naar relaties)\n # met dezelfde functie kunnen afhandelen", "def select_related(self, *related, outer_join=None):\n outer_join = self.outer_join if outer_join is None else outer_join\n return self.clone(\n related_clauses=self.related_clauses + list(related),\n outer_join=outer_join)", "def filter_related_filtersets(self, queryset):\n for related_name, related_filterset in self.related_filtersets.items():\n # Related filtersets should only be applied if they had data.\n prefix = '%s%s' % (related(self, related_name), LOOKUP_SEP)\n if not any(value.startswith(prefix) for value in self.data):\n continue\n\n field_name = self.filters[related_name].field_name\n lookup_expr = LOOKUP_SEP.join([field_name, 'in'])\n subquery = Subquery(related_filterset.qs.values('pk'))\n queryset = queryset.filter(**{lookup_expr: subquery})\n\n return queryset", "def get_iterable_objects(cls, current_objects):\n if current_objects is None:\n return []\n\n if hasattr(current_objects, \"all\"):\n # i.e, Django ManyToMany relationships\n if ismethod(current_objects.all):\n return current_objects.all()\n return []\n\n elif not hasattr(current_objects, \"__iter__\"):\n current_objects = [current_objects]\n\n return current_objects", "def get_related(self, obj, queryset_or_model, num=None):\n queryset, model = get_queryset_and_model(queryset_or_model)\n model_table = qn(model._meta.db_table)\n content_type = ContentType.objects.get_for_model(obj)\n related_content_type = ContentType.objects.get_for_model(model)\n query = \"\"\"\n SELECT %(model_pk)s, COUNT(related_tagged_item.object_id) AS %(count)s\n FROM %(model)s, %(tagged_item)s, %(tag)s, %(tagged_item)s related_tagged_item\n WHERE %(tagged_item)s.object_id = %%s\n AND %(tagged_item)s.content_type_id = %(content_type_id)s\n AND %(tag)s.id = %(tagged_item)s.tag_id\n AND related_tagged_item.content_type_id = %(related_content_type_id)s\n AND related_tagged_item.tag_id = %(tagged_item)s.tag_id\n AND %(model_pk)s = related_tagged_item.object_id\"\"\"\n if content_type.pk == related_content_type.pk:\n # Exclude the given instance itself if determining related\n # instances for the same model.\n query += \"\"\"\n AND related_tagged_item.object_id != %(tagged_item)s.object_id\"\"\"\n query += \"\"\"\n GROUP BY %(model_pk)s\n ORDER BY %(count)s DESC\n %(limit_offset)s\"\"\"\n query = query % {\n 'model_pk': '%s.%s' % (model_table, qn(model._meta.pk.column)),\n 'count': qn('count'),\n 'model': model_table,\n 'tagged_item': qn(self.model._meta.db_table),\n 'tag': qn(self.model._meta.get_field('tag').rel.to._meta.db_table),\n 'content_type_id': content_type.pk,\n 'related_content_type_id': related_content_type.pk,\n 'limit_offset': num is not None and connection.ops.limit_offset_sql(num) or '',\n }\n\n cursor = connection.cursor()\n cursor.execute(query, [obj.pk])\n object_ids = [row[0] for row in cursor.fetchall()]\n if len(object_ids) > 0:\n # Use in_bulk here instead of an id__in lookup, because id__in would\n # clobber the ordering.\n object_dict = queryset.in_bulk(object_ids)\n return [object_dict[object_id] for object_id in object_ids \\\n if object_id in object_dict]\n else:\n return []", "def associated_objects(self):\n return self._associated_objects", "def get_related_objects(self, obj_type):\n suffix = self._get_api_suffix(obj_type)\n if obj_type == self.__class__ and suffix == 'adversaries':\n return []\n endpoint = self._get_api_endpoint() + '/' + suffix\n results = self.tq.get(endpoint)\n if 'data' not in results:\n return []\n\n tr = []\n for obj in results['data']:\n inst = obj_type(self.tq)\n inst.fill_from_api_response(obj)\n tr.append(inst)\n return tr", "def get_related(self, obj, queryset_or_model, num=None):\r\n queryset, model = get_queryset_and_model(queryset_or_model)\r\n model_table = qn(model._meta.db_table)\r\n content_type = ContentType.objects.get_for_model(obj)\r\n related_content_type = ContentType.objects.get_for_model(model)\r\n query = \"\"\"\r\n SELECT %(model_pk)s, COUNT(related_tagged_item.object_id) AS %(count)s\r\n FROM %(model)s, %(tagged_item)s, %(tag)s, %(tagged_item)s related_tagged_item\r\n WHERE %(tagged_item)s.object_id = %%s\r\n AND %(tagged_item)s.content_type_id = %(content_type_id)s\r\n AND %(tag)s.id = %(tagged_item)s.tag_id\r\n AND related_tagged_item.content_type_id = %(related_content_type_id)s\r\n AND related_tagged_item.tag_id = %(tagged_item)s.tag_id\r\n AND %(model_pk)s = related_tagged_item.object_id\"\"\"\r\n if content_type.pk == related_content_type.pk:\r\n # Exclude the given instance itself if determining related\r\n # instances for the same model.\r\n query += \"\"\"\r\n AND related_tagged_item.object_id != %(tagged_item)s.object_id\"\"\"\r\n query += \"\"\"\r\n GROUP BY %(model_pk)s\r\n ORDER BY %(count)s DESC\r\n %(limit_offset)s\"\"\"\r\n query = query % {\r\n 'model_pk': '%s.%s' % (model_table, qn(model._meta.pk.column)),\r\n 'count': qn('count'),\r\n 'model': model_table,\r\n 'tagged_item': qn(self.model._meta.db_table),\r\n 'tag': qn(self.model._meta.get_field('tag').rel.to._meta.db_table),\r\n 'content_type_id': content_type.pk,\r\n 'related_content_type_id': related_content_type.pk,\r\n 'limit_offset': num is not None and connection.ops.limit_offset_sql(num) or '',\r\n }\r\n\r\n cursor = connection.cursor()\r\n cursor.execute(query, [obj.pk])\r\n object_ids = [row[0] for row in cursor.fetchall()]\r\n if len(object_ids) > 0:\r\n # Use in_bulk here instead of an id__in lookup, because id__in would\r\n # clobber the ordering.\r\n object_dict = queryset.in_bulk(object_ids)\r\n return [object_dict[object_id] for object_id in object_ids \\\r\n if object_id in object_dict]\r\n else:\r\n return []", "def filter(self, **kwargs):\n related_names = []\n for argname, _ in kwargs.iteritems():\n related_name = argname.split('__')\n if len(related_name) > 1:\n related_names.append(\"__\".join(related_name[:-1]))\n if len(related_names) > 0:\n return super(\n JeevesQuerySet, self).filter(\n **kwargs).select_related(*related_names)\n else:\n return super(JeevesQuerySet, self).filter(**kwargs)", "def relevant():\n query = (self.query[exp[\"ids\"][0]]\n if exp[\"object_name\"] == \"__previous__\" else exp)\n return object_class.id.in_(\n RelationshipHelper.get_ids_related_to(\n object_class.__name__,\n query[\"object_name\"],\n query[\"ids\"],\n )\n )", "def get_related_trackers(self):\n\n return Tracker.objects.filter(product=self.pk)", "def get_related_models(self):\n\t\tmodels = []\n\t\tif not self.related_models:\n\t\t\treturn models\n\n\t\tfor model in self.related_overrides.get(self.related_override_key(), self.related_models):\n\t\t\ttry:\n\t\t\t\tgroup, model_path, extra_fields = model\n\t\t\texcept ValueError:\n\t\t\t\tgroup, model_path = model\n\t\t\t\textra_fields = ()\n\t\t\tapp_label, model_name = model_path.split('.')\n\t\t\tmodels.append((group, apps.get_model(app_label, model_name,), extra_fields, group.replace('_', ' ')))\n\n\t\treturn models", "def select_related(self, *fields):\n self._not_support_combined_queries(\"select_related\")\n if self._fields is not None:\n raise TypeError(\n \"Cannot call select_related() after .values() or .values_list()\"\n )\n\n obj = self._chain()\n if fields == (None,):\n obj.query.select_related = False\n elif fields:\n obj.query.add_select_related(fields)\n else:\n obj.query.select_related = True\n return obj", "def get_related_indicators(self):\n # imported here to prevent circular deps\n from fn_threatq.threatqsdk.indicator import Indicator\n return self.get_related_objects(Indicator)", "def associatedObjects (self):\n return self.__associatedObjects", "def related_entities(self):\n related_entities = []\n\n for prop in dir(self):\n if prop.endswith('_related'):\n related = getattr(self, prop).all()\n if related:\n for entity in related:\n record_type = entity.object_ref._meta.object_name\n entity_metadata = {\n 'name': str(entity),\n 'record_type': record_type,\n 'field_name': entity._meta.model_name.replace(record_type.lower(), '').title(),\n 'value': entity.value,\n 'url': None\n }\n # Links for top-level entities\n if record_type in ['Organization', 'Person', 'Violation']:\n entity_metadata['url'] = reverse_lazy(\n 'edit-{}'.format(record_type.lower()),\n args=[entity.object_ref.uuid]\n )\n # Standardized relationship links\n elif record_type in ['Emplacement', 'Association']:\n entity_metadata['url'] = reverse_lazy(\n 'edit-organization-{}'.format(record_type.lower()),\n kwargs={\n 'organization_id': entity.object_ref.organization.get_value().value.uuid,\n 'pk': entity.object_ref.pk\n }\n )\n # Irregular relationship links\n elif record_type == 'Composition':\n entity_metadata['url'] = reverse_lazy(\n 'edit-organization-composition',\n kwargs={\n 'organization_id': entity.object_ref.parent.get_value().value.uuid,\n 'pk': entity.object_ref.pk\n }\n )\n elif record_type == 'MembershipPerson':\n entity_metadata['url'] = reverse_lazy(\n 'edit-organization-personnel',\n kwargs={\n 'organization_id': entity.object_ref.organization.get_value().value.uuid,\n 'pk': entity.pk\n }\n )\n elif record_type == 'MembershipOrganization':\n entity_metadata['url'] = reverse_lazy(\n 'edit-organization-membership',\n kwargs={\n 'organization_id': entity.object_ref.organization.get_value().value.uuid,\n 'pk': entity.pk\n }\n )\n related_entities.append(entity_metadata)\n return related_entities", "def prefetch_related(self, *lookups):\n self._not_support_combined_queries(\"prefetch_related\")\n clone = self._chain()\n if lookups == (None,):\n clone._prefetch_related_lookups = ()\n else:\n for lookup in lookups:\n if isinstance(lookup, Prefetch):\n lookup = lookup.prefetch_to\n lookup = lookup.split(LOOKUP_SEP, 1)[0]\n if lookup in self.query._filtered_relations:\n raise ValueError(\n \"prefetch_related() is not supported with FilteredRelation.\"\n )\n clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups\n return clone", "def get_related_filtersets(self):\n related_filtersets = OrderedDict()\n\n for related_name in self.related_filters:\n if related_name not in self.filters:\n continue\n\n f = self.filters[related_name]\n related_filtersets[related_name] = f.filterset(\n data=self.data,\n queryset=f.get_queryset(self.request),\n relationship=related(self, related_name),\n request=self.request,\n prefix=self.form_prefix,\n )\n\n return related_filtersets", "def optimice_query(self, foreignkey_fields, many_to_many_fields):\n query = self.model.objects.select_related(\n *foreignkey_fields) if len(foreignkey_fields) else self.model.objects\n query = query.prefetch_related(\n *many_to_many_fields) if len(many_to_many_fields) else query\n query = query.annotate(**self.include)\n return query", "def get_queryset(self):\n\n return Relationship.objects.filter(\n Q(from_person=self.request.user.person) |\n Q(to_person=self.request.user.person))", "def get_ids_related_to(cls, object_type, related_type, related_ids=[]):\n\n if isinstance(related_ids, (int, long)):\n related_ids = [related_ids]\n\n if not related_ids:\n return db.session.query(Relationship.source_id).filter(sql.false())\n\n destination_ids = db.session.query(Relationship.destination_id).filter(\n and_(\n Relationship.destination_type == object_type,\n Relationship.source_type == related_type,\n Relationship.source_id.in_(related_ids),\n )\n )\n source_ids = db.session.query(Relationship.source_id).filter(\n and_(\n Relationship.source_type == object_type,\n Relationship.destination_type == related_type,\n Relationship.destination_id.in_(related_ids),\n )\n )\n\n queries = [destination_ids, source_ids]\n queries.extend(cls.get_extension_mappings(\n object_type, related_type, related_ids))\n queries.extend(cls.get_special_mappings(\n object_type, related_type, related_ids))\n\n return cls._array_union(queries)", "def selected_relationships(self):\n return self._selected_relationships", "def _filter_related_m2m(self, rel):\n field = rel.field\n if isinstance(field, models.ManyToManyField):\n if self._join_allowed(rel.parent_model, rel.model, field):\n return rel", "def get_tag_related_posts(tag):\n related_posts_ids = [post.id for post in tag.posts.all()[:20]]\n return Post.objects.filter(id__in=related_posts_ids). \\\n add_comments_count(). \\\n prefetch_related('author'). \\\n prefetch_with_tags_and_likes()", "def get_queryset(self):\n # Get tags from the request if it was specified\n tags = self.request.query_params.get('tags')\n # Get authors from the request if it was specified\n authors = self.request.query_params.get('authors')\n # Make copy of queryset as to not modify the original queryset\n queryset = self.queryset\n if tags:\n # Get list of ids specified\n tag_ids = self._params_to_ints(tags)\n # Filter on the foreign key object with tags__id__in\n queryset = queryset.filter(tags__id__in=tag_ids)\n if authors:\n # Get list of ids specified\n author_ids = self._params_to_ints(authors)\n # Filter by the author\n queryset = queryset.filter(authors__id__in=author_ids)\n\n return queryset.filter(user=self.request.user)", "def get(self, *args, **kwargs):\n self.before_get(args, kwargs)\n\n relationship_field, model_relationship_field, related_type_, related_id_field = self._get_relationship_data()\n related_view = self.schema._declared_fields[relationship_field].related_view\n related_view_kwargs = self.schema._declared_fields[relationship_field].related_view_kwargs\n\n obj, data = self._data_layer.get_relationship(model_relationship_field,\n related_type_,\n related_id_field,\n kwargs)\n\n for key, value in copy(related_view_kwargs).items():\n if isinstance(value, str) and value.startswith('<') and value.endswith('>'):\n tmp_obj = obj\n for field in value[1:-1].split('.'):\n tmp_obj = getattr(tmp_obj, field)\n related_view_kwargs[key] = tmp_obj\n\n result = {'links': {'self': request.path,\n 'related': url_for(related_view, **related_view_kwargs)},\n 'data': data}\n\n qs = QSManager(request.args, self.schema)\n if qs.include:\n schema = compute_schema(self.schema, dict(), qs, qs.include)\n\n serialized_obj = schema.dump(obj)\n result['included'] = serialized_obj.data.get('included', dict())\n\n self.after_get(result)\n return result", "def queryset(self, request):\n qs = super(SiteAdmin, self).queryset(request)\n qs = Site.admin.select_related().filter(id__in=qs)\n ordering = self.ordering or ()\n if ordering:\n qs = qs.order_by(*ordering)\n return qs" ]
[ "0.7643895", "0.6433926", "0.62799156", "0.61716324", "0.60885555", "0.5964254", "0.5934872", "0.58134305", "0.5811461", "0.5781093", "0.57791954", "0.57010734", "0.5631755", "0.55931336", "0.55913925", "0.55770904", "0.5536853", "0.55321735", "0.5504152", "0.5481324", "0.5445515", "0.5442537", "0.5440629", "0.5369968", "0.5368387", "0.53302693", "0.5328749", "0.5293258", "0.52756876", "0.52116346" ]
0.8107112
0
add user to db. User details (name, pass, phone) is in json_details
def add_user_to_db(json_details): return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_user():\n json = request.json\n name = json[\"name\"]\n email = json[\"email\"]\n pwd = json[\"pwd\"]\n if name and email and pwd and request.method == \"POST\":\n sql = \"INSERT INTO users(user_name, user_email, user_password) \" \\\n \"VALUES(%s, %s, %s)\"\n data = (name, email, pwd)\n try:\n conn = mysql.connect()\n cursor = conn.cursor()\n cursor.execute(sql, data)\n conn.commit()\n cursor.close()\n conn.close()\n resp = jsonify(\"User created successfully!\")\n resp.status_code = 200\n return resp\n except Exception as exception:\n return jsonify(str(exception))\n else:\n return jsonify(\"Please provide name, email and pwd\")", "def add_user():\n username = request.json['username']\n email = request.json['email']\n\n user = User(username, email)\n\n db.session.add(user)\n db.session.commit()\n return user_schema.jsonify(user)", "def add_user(db, user_data):\n username, password, email, position, phone = user_data[:5]\n\n # Set the new user id\n #users = db['user'].find()\n #next_id = max(u['_id'] for u in users) + 1\n\n # Set Access Level. 1 will be for a user that has some content to view.\n # Default level is 0\n access_level_map = {'D': 3, 'S': 2}\n access_level = access_level_map.get(position, 0)\n\n security_questions = []\n security_answers = []\n\n security_answers_hash = [generate_password_hash(ans)\n for ans in security_answers]\n\n password_hash = generate_password_hash(password)\n\n\n # Create the data JSON\n new_user = db['user'].insert_one({\n 'username': username,\n 'access_level': access_level,\n 'email': email,\n 'position': position,\n 'phone': phone,\n 'security_questions': security_questions,\n 'login_timestamp':str(datetime.datetime.utcnow()),\n 'deleted': False\n })\n\n db['security'].insert_one({\n 'user_id': str(new_user.inserted_id),\n 'password': password_hash,\n 'security_answers': security_answers_hash\n })\n\n # Insert user into DB\n return True", "def add_user(request):\n text = request.body\n text_str = str(text, encoding=\"utf8\")\n text_dict = json.loads(text_str)\n\n uid = text_dict.get(\"id\")\n name = text_dict.get(\"name\")\n user = {\n \"id\": uid,\n \"name\": name\n }\n return JsonResponse({\"code\": 10200,\n \"message\": \"add successful\",\n \"data\": user})", "def create_user():\r\n if not request.is_json or 'name' not in request.get_json() or 'phone_number' not in request.get_json() or 'password' not in request.get_json():\r\n return bad_request('Missing required data.')\r\n try:\r\n return add_user(request)\r\n except:\r\n return bad_request(error_messages['user_exist'])", "def new_user():\n success = True\n try:\n usr = User(request.json['username'], request.json['email'])\n db.session.add(usr)\n db.session.commit()\n except:\n success = False\n return jsonify(success=success)", "def post(self):\n data = flask.request.json\n user_dao.create_user(data)\n return None, 201", "def add_user():\n input = request.get_json()\n\n if input == None:\n return jsonify({'error': 'Invalid POST request, no data'}), 400\n if not 'username' in input:\n return jsonify({'error': 'Invalid POST request, missing username'}), 400\n if not 'password' in input:\n return jsonify({'error': 'Invalid POST request, missing password'}), 400\n if not 'display_name' in input:\n return jsonify({'error': 'Invalid POST request, missing display_name'}), 400\n if not 'role' in input:\n return jsonify({'error': 'Invalid POST request, missing role'}), 400\n\n netAdminToolDB = app.config['DATABASE']\n id = netAdminToolDB.add_user(input['username'], input['password'],\n input['display_name'], input['role'])\n\n newUser = netAdminToolDB.get_user(id)\n newUserDict = dict(newUser)\n uri = url_for('get_user', user_id=newUser.id, _external=True)\n newUserDict['uri'] = uri\n\n return jsonify({'user': newUserDict}), 201", "def post_user():\n\tuser = User.add(request.json)\n\tif user == None:\n\t\tabort(404)\n\treturn jsonify({'user': user.output()})", "def add_user():\n if not request.json:\n abort(400)\n\n db_conn = sqlite3.connect(db_path)\n db = db_conn.cursor()\n\n username = request.json['username']\n public_key = request.json['public_key']\n\n try:\n db.execute(\"INSERT INTO users (username) VALUES (?)\", [username])\n db.execute(\"INSERT INTO public_keys (username, public_key, status) VALUES (?,?,?)\", [username, public_key, PK_STATUS_OK])\n db_conn.commit()\n db_conn.close()\n except sqlite3.IntegrityError:\n db_conn.close()\n abort(400)\n return jsonify({'success':True})", "def addUser():\n if request.method == \"POST\":\n (result, status) = parse_user_data(request)\n return jsonify(result), status # HTTP Status Created [201]\n if request.method == \"DELETE\":\n (result, status) = delete_user_data(request)\n return jsonify(result), status # HTTP Status Created [201]", "def add_user():\n\n username = request.form.get('username')\n email = request.form.get('email')\n mobile = request.form.get('mobile')\n\n if not username or not email or not mobile:\n return jsonify({'message': 'Username or email or mobile not provided'}), 404\n\n token = request.headers.get('token')\n # Token Validation\n token_valid, response = is_token_valid(token)\n if not token_valid:\n return response\n token_username = response\n\n # Privilege handling\n if token_username != username:\n return jsonify({'message': \"Tried creating an account with mismatched token\"}), 404\n\n if username in Users:\n return jsonify({'message': 'User {} already exists'.format(username)}), 404\n\n Users[username] = {\n 'username': username,\n 'email': email,\n 'mobile': mobile,\n }\n\n Profiles[username] = {\n 'username': username,\n 'description': ''\n }\n\n return jsonify(Users[username]), 200", "def post(self):\n data = request.json\n return save_new_user(data)", "def register():\n insert_user(json_body())\n try:\n db.session.commit()\n except IntegrityError:\n raise exc.CouldNotCreateEntry()\n\n return jsonify({'message': 'Created user.'}), 200", "def post(self):\n data = UserRegister.parser.parse_args()\n\n if UserModel.find_by_id(data['username']):\n print(\"Failed\", file=sys.stderr)\n return {\n 'message':\n \"A user with name '{}' already exists.\"\n .format(data['username'])\n }, 400\n\n\n user = UserModel(**data) # data['username'], data['details'].......\n user.save_to_db()\n\n return {\"message\": \"User created successfully.\"}, 201", "def handle_add_user():\n new_user = User(first_name=request.form['first_name'], last_name=request.form['last_name'], image_url=request.form['image_url'])\n db.session.add(new_user)\n db.session.commit()\n\n return redirect('/')", "def post(self):\n data = request.json\n return save_new_user(data=data)", "def add_user(self, user):\n return self.ireporter_db.insert_data_users(\n user.get(\"firstname\"),\n user.get(\"lastname\"),\n user.get(\"othernames\"),\n user.get(\"username\"),\n user.get(\"email\"),\n user.get(\"phonenumber\"),\n user.get(\"is_admin\"),\n user.get(\"password\"),\n user.get(\"registered_on\")\n )", "def _add_user(data: dict) -> dict:\n user = create_user()\n name = []\n if 'first_name' in data:\n name.append(data['first_name'])\n if 'middle_name' in data:\n name.append(data['middle_name'])\n if 'last_name' in data:\n name.append(data['last_name'])\n user['name'] = ' '.join(name)\n if 'role' in data:\n user['exp']['exp']['title'] = data['role']\n if 'affiliation' in data:\n user['abs'] = data['affiliation']\n user['exp']['exp']['company'] = data['affiliation']\n elif 'organization' in data:\n user['abs'] = data['organization']\n user['exp']['exp']['company'] = data['organization']\n phone = []\n if 'phone' in data:\n phone.append(data['phone'])\n if 'phone_ext' in data:\n phone.append(data['phone_ext'])\n user['contact']['phone'] = '-'.join(phone)\n user['contact']['email'] = data['email'] if 'email' in data else ''\n if 'degrees' in data:\n if not user.title:\n user['edu']['degree'] = data['degrees']\n if len(user['name']) < 0:\n user['name'] = user['contact']['email'] if len(user['contact']['email']) > 0 else 'Anonymous'\n return user", "def post_user_obj():\n dic = {}\n dic = request.get_json(silent=True)\n if dic is None:\n abort(400, \"Not a JSON\")\n if \"password\" not in dic.keys():\n abort(400, \"Missing password\")\n if \"email\" not in dic.keys():\n abort(400, \"Missing email\")\n new_user = user.User()\n for k, v in dic.items():\n setattr(new_user, k, v)\n new_user.save()\n return jsonify(new_user.to_dict()), 201", "def add_user(self, userdict):\n return self.post('users', userdict)", "def post(self):\n current_user = get_jwt_identity()\n if not current_user == 'admin':\n return jsonify({\"message\": \"You are not authorized\"}), 401\n \n data = request.get_json()\n name = data.get('name')\n user_name = data.get('user_name')\n password = data.get('password')\n role = data.get('role')\n\n valuser = validate_user_signup(name=name, user_name = user_name, password=password, role = role)\n\n if valuser:\n return valuser\n\n obj_users = Users(name, user_name, password, role)\n database.insert_table_users(obj_users)\n return jsonify({\"Success\": \"user has been added\"}), 201", "def add_new_user_to_db():\n first_name = request.form['first_name']\n last_name = request.form['last_name']\n img_url = request.form['img_url']\n\n new_user = User(first_name=first_name,last_name=last_name, img_url=img_url)\n db.session.add(new_user)\n db.session.commit()\n\n return redirect('/users')", "def post(self):\n data = request.get_json()\n # validate received fileds\n fields_validate = ViewsValidation()\n fields = [\n 'firstname',\n 'lastname',\n 'email',\n 'phonenumber',\n 'username',\n 'othernames',\n 'password'\n ]\n missing_fields = fields_validate.missing_fields(fields, data)\n\n if not missing_fields: # filter missing fields\n user_entry = {\n \"firstname\": data[\"firstname\"],\n \"lastname\": data[\"lastname\"],\n \"email\": data[\"email\"],\n \"phonenumber\": data[\"phonenumber\"],\n \"username\": data[\"username\"],\n \"othernames\": data[\"othernames\"],\n \"password\": data['password'],\n \"createdBy\": len(['title'])\n }\n\n res = self._userz.create_user(user_entry)\n print(\"RES:::\", res)\n if res:\n if res[\"status\"] == 400:\n return res\n else:\n return {\n \"status\": 201,\n \"data\": [{\n \"id\": res[\"id\"],\n \"message\": \"user record has been created\"\n }]\n }, 201\n else:\n return {\n \"status\": 400,\n \"error\": \"Bad Request\"\n }, 400\n else:\n return {\n \"status\": 403,\n \"error\": \"Bad request: missing\"\n \" fileds {}\".format(missing_fields)\n }, 403", "def insert_user(self, userid, username, phone):\n try:\n query = \"insert into user(userId,userName,phone)values({},'{}','{}')\".format(userid, username, phone)\n print(query)\n cur = self.con.cursor()\n cur.execute(query)\n self.con.commit()\n logger.info(\"user saved to db\")\n except Exception as e:\n logger.error(\"Error occured at data insertion \", e)", "def create_user():\r\n data = request.get_json() or {}\r\n print(data)\r\n # some data checks\r\n if 'username' not in data or 'password' not in data:\r\n return bad_request('must include username and password fields')\r\n if User.query.filter_by(username=data['username']).first():\r\n return bad_request('please use a different username')\r\n user = User()\r\n # add user to database\r\n user.add_user(data)\r\n # check that the transaction was successful\r\n res = User.query.filter_by(username=data['username']).one_or_none()\r\n # return added user as query response\r\n if res:\r\n response = jsonify(res.to_dict())\r\n response.status_code = 201\r\n # else return error\r\n else:\r\n response.status_code = 403\r\n response.headers['Location'] = url_for('api.get_user', id=user.id)\r\n return response", "def save_user(self):\n args = parser.parse_args()\n data = {\n 'firstname': request.json.get('firstname').capitalize(),\n 'lastname': request.json.get('lastname').capitalize(),\n 'othernames': request.json.get('othernames', '').capitalize(),\n 'email': request.json.get('email').lower(),\n 'phoneNumber': request.json.get('phoneNumber'),\n 'username': request.json.get('username').lower(),\n 'registered': datetime.datetime.utcnow(),\n 'password': self.set_password(request.json.get('password')),\n 'isAdmin': self.isAdmin, 'public_id': self.public_id\n }\n userByEmail = self.get_user(data['email'])\n userByUsername = self.get_user(data['username'])\n if userByEmail is not None:\n return 'email exists'\n elif userByUsername is not None:\n return 'username exists'\n\n query = \"\"\"INSERT INTO users (firstname,lastname,othernames,email,phoneNumber,username,registered,password,isAdmin,public_id) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\"\"\n values = data['firstname'], data['lastname'], data['othernames'], data['email'], data['phoneNumber'], data[\n 'username'], data['registered'], data['password'], data['isAdmin'], data['public_id']\n\n conn = self.db\n cursor = conn.cursor()\n cursor.execute(query, values)\n conn.commit()\n return data", "def add_user():\n\n # get JSON from post request\n post_data = request.get_json()\n\n # enforce input\n if not post_data:\n response_object = {\n 'status': 'fail',\n 'message': 'Invalid payload'\n }\n return jsonify(response_object), 400\n\n # remember fields\n username = post_data.get('username')\n email = post_data.get('email')\n\n # check for duplicate users and add to db\n try:\n user = User.query.filter_by(email=email).first()\n if not user:\n db.session.add(User(username=username, email=email))\n db.session.commit()\n reponse_object = {\n 'status': 'success',\n 'message': f'{email} was added!'\n }\n return jsonify(reponse_object), 201\n else:\n response_object = {\n 'status': 'fail',\n 'message': 'Sorry, that email already exists'\n }\n return jsonify(response_object), 400\n\n # if missing fields\n except exc.IntegrityError as e:\n # rollback current transaction and close any subtransactions\n db.session.rollback()\n response_object = {\n 'status': 'fail',\n 'message': 'Invalid payload'\n }\n return jsonify(response_object), 400", "def fusion_api_add_user(self, body, api=None, headers=None):\n return self.user.create(body, api, headers)", "def create_user():\n usr = request.get_json()\n if not usr:\n abort(400, {'Not a JSON'})\n elif 'email' not in usr:\n abort(400, {'Missing email'})\n elif 'password' not in usr:\n abort(400, {'Missing password'})\n else:\n new_usr = User(**usr)\n storage.new(new_usr)\n storage.save()\n return jsonify(new_usr.to_dict()), 201" ]
[ "0.7702093", "0.75006545", "0.7271409", "0.7251211", "0.7179736", "0.71710926", "0.7168135", "0.71519035", "0.71314853", "0.7124831", "0.7101933", "0.709933", "0.70991194", "0.70895416", "0.7084391", "0.70836115", "0.7083596", "0.7064821", "0.70374316", "0.7032721", "0.7026689", "0.7024627", "0.701983", "0.69792074", "0.69608873", "0.6954362", "0.69408274", "0.6928677", "0.6888445", "0.6880322" ]
0.87101287
0
Repeats a command a specified number of times.
async def do(ctx, times : int, *, command): msg = copy.copy(ctx.message) msg.content = command for i in range(times): await bot.process_commands(msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def repeat(times : int, content='repeating...'):\n for i in range(times):\n await bot.say(content)", "async def repeat(ctx, times : int, content='repeating...'):\n for i in range(times):\n await bot.say(content)", "async def repeat(ctx, times: int, content='repeating...'):\n for i in range(times):\n await ctx.send(content)", "async def repeat(self,ctx, times: int, content='repeating...'):\n for i in range(times):\n await ctx.send(content)", "def repeat(self, fn, *args, **kwargs):\n return repeat_n_times(self.n, fn, *args, **kwargs)", "def repeat(num_times):\n\n def decorator_repeat(func):\n \"\"\"\n defines wrapper_repeat(*args, **kwargs)\n\n :returns: wrapper_repeat\n \"\"\"\n\n @functools.wraps(func)\n def wrapper_repeat(*args, **kwargs):\n \"\"\"\n func(*args, **kwargs) num_times\n\n :return: last return value\n \"\"\"\n for _ in range(num_times):\n value = func(*args, **kwargs)\n return value\n\n return wrapper_repeat\n\n return decorator_repeat", "def repeat_string_n_times(string, count):\r\n return string * int(count)", "def repeatfunc(func, n, *args):\n return starmap(func, repeat(args, n))", "def repeat_count(instance, args):\r\n count = instance.repeat_count(args)\r\n return count", "def repeat(self, count):\n return self.Sequence((self,) * count)", "async def repeat(self, ctx, times : int, content : str):\n if times < 6:\n for i in range(times):\n await ctx.send(content)\n else:\n await ctx.send(\"Please don't get me banned by Discord! (Max 5)\")", "def cli(string,repeat,out):\n for x in range(repeat):\n click.echo(click.style('Hello {}'.format(string), fg='black',bg='white'),file=out)", "def twist(r, num_repeats=1):\n for i in range(num_repeats):\n r.go(0, 50)\n time.sleep(.75)\n r.stop()\n time.sleep(.1)\n r.go(0, -50)\n time.sleep(.75)\n r.stop()\n time.sleep(.1)", "def repeat(fun, n):\n for i in range(n):\n yield fun()", "def shake(r, num_repeats=1):\n for i in range(num_repeats):\n r.go(25)\n time.sleep(.1)\n r.stop()\n time.sleep(.1)\n r.go(-25)\n time.sleep(.1)\n r.stop()\n time.sleep(.1)", "def repeat(seq, n):\n for e in seq:\n for _ in range(n):\n yield e", "def repeat(word, repetitions):\n return word * repetitions", "def Repeat(dataset, count=None):\n return dataset.repeat(count=count)", "def repeat(self, number_of_repeats):\n return \"G\" + str(number_of_repeats)", "def hello(count, name):\n for x in range(count):\n click.echo('Hello %s!' % name)", "async def async_send_command(self, command: Iterable[str], **kwargs: Any) -> None:\n num_repeats = kwargs[ATTR_NUM_REPEATS]\n\n for _ in range(num_repeats):\n for single_command in command:\n await self.coordinator.roku.remote(single_command)\n\n await self.coordinator.async_request_refresh()", "def repeat(x, repeats, axis=None):\r\n return RepeatOp(axis=axis)(x, repeats)", "def retry(times):\n return repeat_with_success_at_least(times, 1)", "def repeat(iterable, count=None):\n if count is None:\n while True:\n for sample in iterable:\n yield sample\n else:\n for i in range(count):\n for sample in iterable:\n yield sample", "def loop(func, n):\n for i in range(n):\n func()", "async def repeat(self, ctx, *, text):\n await ctx.send(text)", "def repeatfunc(cls, func, times=None, *args):\n if times is None:\n return starmap(func, repeat(args))\n return starmap(func, repeat(args, times))", "def repeatfunc(func, times=None, *args):\n if times is None:\n return starmap(func, repeat(args))\n return starmap(func, repeat(args, times))", "def repeatfunc(func, times=None, *args):\n if times is None:\n return starmap(func, repeat(args))\n return starmap(func, repeat(args, times))", "async def ripgupta(self, ctx, count, *, message):\n int(count)\n gupta = 468209010978455552\n channel = 617525238392946699\n mloop = 0\n int(mloop) \n while mloop > count:\n await channel.send(\"{} {}\".format(gupta.mention, message))\n int(mloop)\n mloop = mloop + 1" ]
[ "0.738469", "0.7359903", "0.73264", "0.72749573", "0.67965984", "0.67318964", "0.66883427", "0.64287454", "0.6378672", "0.6340102", "0.62727064", "0.6257879", "0.6256501", "0.6239841", "0.6161488", "0.6144609", "0.60422397", "0.6017532", "0.6012069", "0.6001311", "0.5942651", "0.59071255", "0.58138865", "0.5803833", "0.5785928", "0.5771359", "0.5765533", "0.57610226", "0.57610226", "0.5742898" ]
0.7408826
0
Gives a URL to the current bot changelog.
async def changelog(): await bot.say('https://discord.gg/y2PcWMM')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_changelog_url(repository_url, branch):\n changelog_url = f\"{repository_url}/blob/{branch}/CHANGES.txt\"\n requests_var = requests.get(changelog_url, timeout=30)\n if requests_var.status_code != 200:\n raise RuntimeError(f\"Page at URL {changelog_url} not found\")\n\n return changelog_url", "async def changelog(self, ctx: commands.Context):\n status, commits = GitHub().repos.harkonenbade.yutu.commits.get(per_page=10)\n if status == 200:\n await ctx.send(content=\"```Changelog:\\n{}```\".format(\"\\n\".join([\"- {}\".format(c['commit']['message'])\n for c in commits])))\n else:\n await ctx.send(content=\"Error: Cannot reach github\")", "async def changelog(self, ctx, version: str.lower = \"\"):\n changelog = await Changelog.from_url(self.bot)\n version = version.lstrip(\"vV\") if version else changelog.latest_version.version\n\n try:\n index = [v.version for v in changelog.versions].index(version)\n except ValueError:\n return await ctx.send(\n embed=Embed(\n color=Color.red(),\n description=f\"The specified version `{version}` could not be found.\",\n )\n )\n\n paginator = EmbedPaginatorSession(ctx, *changelog.embeds)\n try:\n paginator.current = index\n await paginator.run()\n except asyncio.CancelledError:\n pass\n except Exception:\n try:\n await paginator.close()\n except Exception:\n pass\n logger.warning(\"Failed to display changelog.\", exc_info=True)\n await ctx.send(\n f\"View the changelog here: {changelog.CHANGELOG_URL}#v{version[::2]}\"\n )", "def GetChangeUrl(host, change):\n return 'https://%s/a/%s' % (host, _GetChangePath(change))", "def changelog(request, page_url=None):\n\n title = 'Global History'\n page_number = request.GET.get('page', 1)\n\n # If page_url is present then Changelog entries for that WikiPage should be\n # displayed.\n if page_url:\n try:\n # Trying to fetch wiki page given its page url.\n wikipage = WikiPage.objects.get(url=page_url)\n title = '\"%s\" History' % wikipage.title\n\n except exceptions.ObjectDoesNotExist:\n # Redirect response to wikipage edit view.\n return HttpResponseRedirect(\n urlresolvers.reverse('edit_page', args=[page_url]))\n\n else:\n # Filter ChangeLog entries for a WikiPage instance.\n changelogs = utils.get_view_paginator(\n Changelog, page_number, filters={'wikipage': wikipage})\n else:\n # Return all ChangeLog instances in djwiki.\n changelogs = utils.get_view_paginator(Changelog, page_number)\n\n return render(request, 'changelog.html', {'title': title,\n 'page_url': page_url,\n 'changelogs': changelogs})", "def getBuildbotURL():", "async def changelog(self, ctx, opt=None):\r\n if ctx.message.author == self.bot.user:\r\n return\r\n if opt is None:\r\n await self.bot.say('**Update: 9/5/16 // 1.0**\\n\\nServo is finally out of the beta stage!\\n\\nChanges & Fixes:\\n - The new format and tech information:\\n - Servo now uses the cogs function of discord.py, making everything organized, and tidy.\\n - Servo now supports discord.py plugins! Basically, these are plugins that can easily be added or removed, and Servo will still run if they don\\'t work.\\n - This allows easily editing the plugin, and reloading it, instead of restarting Servo completely!\\n - If you guys know Python, you can make plugins too! Ask me through a PM for information!\\n - I have recoded about 75% of Servo, allowing a slight performance boost.\\n - Throughout Servo\\'s code, I have added documentation notes, these are little tidbits of information to explain a specific function.\\n - Servo now has a new checks system. This allows checking for roles, IDs, and other information of users, to see who can use a command.\\n - This allows easier management of staff and owner commands.\\n - Command changes and notes:\\n - The `!name` command can now only be done by me.\\n - The `!chat` command has been removed, since it has never been used. The method of mentioning Servo still works with Cleverbot functionality.\\n - The `!throw` command now allows you to specify another user if you wish, and then Servo will attack that user.\\n - The `!help` command is still disabled until further notice.\\n - When you use any command that allows getting other user stuff, you can now get Servo\\'s info.\\n - Before, this wouldn\\'t work, because of the Cleverbot functionality, but it now has been fixed.\\n - New commands:\\n - `!ts` is a command based around Taylor Swift. The current option is `song`, which returns a random Taylor Swift song you should listen to.\\n - `!servo` is a command that allows you to see if Servo is running.\\n - Try in #general and then #laboratory-and-spam - you\\'ll get different results.')\r\n await self.bot.say('**Update: // 1.0.010210**\\n\\nCompared to the big update last time, this one is tiny.\\n\\n- Changes:\\n - The `extension_checker` plugin has been renamed to `extension_loader`.\\n - Fixed some wording in the documentation string throughout Servo\\'s code.\\n - Moved and renamed `cogs\\TaylorSwift_commands\\songs_command.py` to `cogs\\TaylorSwift_commands.py` \\n- Additions:\\n - The `!ts` command now has a new option: `album`. Usage: \"!ts album\".\\n - This will give the user a Taylor Swift album they should listen to.\\n - Added a `!packgen` command that will allow the user to make a Sims 4 pack.\\n - Usage: \"!packgen\", and then follow the prompts.\\n - Requested by Proculus#6163.\\n - Added a `!source` command that will give the user the link to the source code.\\n - More documentation strings throughout the source code.')\r\n elif opt == \"beta\".lower():\r\n await self.bot.say('**Update: 7/14/16 // 0.1.1** \\n\\n- Servo will no longer be rude if you make a mistake or lose in gambling. \\n - With gambling, you can no longer gamble a negative amount. \\n - Wouldn\\'t that just make you lose more anyway, or would they have to give you money? \\U0001F914 \\n\\n**Update: 7/14/16 // 0.1.1.102** \\n\\n- A new command `!randompack` (or it\\'s alias `!pack`) will let Servo give you a suggestion on what pack for The Sims 4 you should buy. \\n - Isn\\'t it nice to always have someone suggest something fun to buy? \\n - Especially when you\\'re bored with what you have now?\\n - Because you\\'re so selfish and aren\\'t thankful for what you have?\\n - Sorry, got too excited. \\n\\n**Update: 7/14/16 // 0.1.1.202** \\n\\n- Every command now has capitalized aliases, allowing the first letter to be capitalized, the merged words\\' first letter (like YouTube), or the whole command. \\n - You\\'re welcome you quick typers that speal most thimgs wronj, or the ones who can\\'t let LEt Go oF the ShiFT key aT THe riGHT TImE.\\n\\n**Update: 7/15/16 // 0.1.2.102** \\n\\n- Servo now logs the chat, and keeps track of who said it, and in which channel. \\n - We\\'re onto you guys, we\\'re watching. \\n-You can now use `!changelog` to look at the changelog. \\n - Wow, so many useless changes every time! \\n-Want to get hit with something? Use `!throw` and watch out!')\r\n await self.bot.say('**Update: 7/15/16 // 0.1.2.202** \\n\\n- Fixed a small issue that would cause Servo to create a blank log, alongside a complete one. \\n - We\\'re watching you guys still, don\\'t think we\\'ll ever stop. \\n\\n**Update: 7/15/16 // 0.1.2.301** \\n\\n- Fixed a formatting error in the changelog that would cause the headers to not be bolded. \\n\\n- Changed the formatting of the log file neames to M - D - Y, instead of D - M- Y. \\n\\n**Update: 7/15/16 // 0.1.2.402** \\n\\n - Added Backyard Stuff into the responses for `!pack`.')\r\n await self.bot.say('**Update: 7/15/16 // 0.1.2.506** \\n\\n- Staff can now use the new mod command, `!setgame` to choose which game Servo is currently playing. \\n - This can be cleared using `!setgame none` or `!setgame clear`. \\n\\n**Update: 7/15/16 // 0.1.2.604** \\n\\n- When staff members use the `!setgame` command, there will now be a reply indicating completion, and then will be deleted shortly after. \\n\\n**Update: 8/1/16 // 0.1.2.708** \\n\\n - Corrected the typo of \\'Luxary\\' to \\'Luxury\\' from the `!pack` command. \\n - The changelog order is now reversed, showing the more recent updates at the bottom. \\n - It makes sense because that\\'s what you would see at first, right? \\n\\n**Update: 8/26/16 // 0.1.3.102** \\n\\n- Added commands for getting info: `!infobot`, `!infoserver`, `!infouser`. \\n - `!infobot` displays information about Servo. \\n - `!infoserver` displays server information. \\n - `!infouser` displays info about the user who commits the command. \\n - None of those descriptions were useful. *shrugs* \\n- Added `!version` (or `!botversion`) to show Servos\\'s current version. \\n- Other things. \\n - Some secret things. \\n - Things I\\'ve been planning for a while. \\U0001F60F')\r\n await self.bot.say('**Update: 8/27/16 // 0.1.4.608** \\n\\n- There is a new negative response when answering yes or no questions with `!8ball`. \\n- A new **secret** command. *Hint, hint.* \\n - If you find this out, do not tell anybody. \\n - You will be punished and killed.\\n - Ahem, sorry. Got a little too carried away. \\n\\n**Update: 8/27/2016 // 0.1.5.809** \\n\\nPretty big update today:\\n\\n- Servo now notifies the server of username, and nickname changes.\\n- Member usernames and IDs are now being stored.\\n - Maybe for secret stuff? Who knows? \\U0001F60F \\n- Added `!choose`. (Usage like `!choose <option1>;<option2>`) This allows Servo to make hard decisions for you. \\n - There is no limit to the amount of choices. Make sure to follow the formatting perfectly.\\n- The `!help` command has been removed until further notice. \\n\\n**Update: 8/28/2016 // 0.1.5.909** \\n\\nSmall update this time: \\n\\n- When logging, Servo will now remove any emojis in the user\\'s name, or message. This will now reduce the amount of \"invalid character\" errors, and log more chat.\\n - Yeah, we are still watching you guys.\\n - There is still a bug that that will cause \"b\\'\" and \"\\'\" to wrap around the user\\'s name and message inside the logs, and the user list files. This will be solved soon.')\r\n await self.bot.say('\\n**Update: 9/1/16 // 0.1.6.101**\\n\\nA big update this time around:\\n\\nChanges:\\n - When using `!infouser`, you can now request the info of another user by mentioning them. (Example: `!infouser @Simalary®️ (Chris)#6397`)\\n - Staff can now use `!name <username>` to set Servo\\'s new username. (Example: `!name Servo (Alpha)`)\\n - If the username cannot be changed at that time, Servo will send an error saying so.\\n - The command will only run if Servo is running on my PC.\\n - Some secret stuff has been added. \\U0001F60F\\nFixes:\\n - Fixed some nice try mesages that were missing smirks. \\n\\n**Update: 9/2/16 // 0.1.6.306**\\n\\nChanges & Additions:\\n\\n - \\'City Life\\' has been added to a response for the `!pack` command.\\n - The response will say \\'@Servo (Beta)#6771, you should buy **City Life** when it comes out.\\', since the pack isn\\'t out yet. Duh.\\n - Added `!plumbob`. This will have Servo send you a cute gif of the plumbob from the Sims 4.\\n\\nFixes:\\n\\n - The logs will correctly show Servo\\'s messages, instead of mixing the channel and the message around.\\n - It\\'s not opposite day.\\n - Wait, does that mean it is opposite day?\\n - The `!choose` command is more specific now.\\n - The usage is `!choose Option 1; Option 2; Option 3`, until you run out of choices.\\n - **The formatting is specific**, use \"; \", not \";\" when seperating your choices.')\r\n await self.bot.say(' \\n**Update: 9/2/16 // 0.1.6.307**\\n\\nChanges:\\n - When getting another user\\'s info with `!infouser @USER#0000`, the response from Servo will now say you are getting **that** person\\'s info. \\n\\n**Update: 9/3/16 // 0.1.7.102**\\n\\nChanges & Additions:\\n - There is a new feature called **SimPoints**. Every user will have points, as of right now, everyone has 500.\\n - The default amount for when someone joines the server is 500.\\n - At the moment, there is no way to gain or lose points.\\n - To check your amount of points, you may use `!infouser` or `!points`.\\n - You can see another user\\'s points by using `!infouser @USER#0000` or `!points @USER#0000`.')\r\n else:\r\n await self.bot.say('{}, if you want the old beta changelog, type `!changelog beta`. If you want the current changelog, type `!changelog`.')", "def get_changelog(no):\n path = str(os.path.realpath(__file__ + '/../../CHANGELOG.md'))\n lines = [line.rstrip('\\n').strip() for line in open(path) if len(line.rstrip('\\n').strip()) > 0]\n changelog = []\n title = ''\n body = []\n for l in lines:\n if l.startswith('#'):\n if len(title) > 0:\n changelog.append({'title': title, 'body': body})\n body = []\n title = l.replace('### ', '')\n else:\n body.append(l.replace('- ', ''))\n\n return changelog[0:no]", "def GetChangePageUrl(host, change_number):\n return 'https://%s/#/c/%d/' % (host, change_number)", "def _get_changelog_contents(ctx: Context, version: str):\n return ctx.run(\n \"towncrier\",\n \"build\",\n \"--draft\",\n f\"--version={version}\",\n capture=True,\n ).stdout.decode()", "async def about(self, ctx):\n embed = Embed(color=self.bot.main_color, timestamp=datetime.utcnow())\n embed.set_author(\n name=\"Modmail - About\",\n icon_url=self.bot.user.avatar_url,\n url=\"https://discord.gg/F34cRU8\",\n )\n embed.set_thumbnail(url=self.bot.user.avatar_url)\n\n desc = \"This is an open source Discord bot that serves as a means for \"\n desc += \"members to easily communicate with server administrators in \"\n desc += \"an organised manner.\"\n embed.description = desc\n\n embed.add_field(name=\"Uptime\", value=self.bot.uptime)\n embed.add_field(name=\"Latency\", value=f\"{self.bot.latency * 1000:.2f} ms\")\n embed.add_field(name=\"Version\", value=f\"`{self.bot.version}`\")\n embed.add_field(name=\"Author\", value=\"[`kyb3r`](https://github.com/kyb3r)\")\n\n changelog = await Changelog.from_url(self.bot)\n latest = changelog.latest_version\n\n if parse_version(self.bot.version) < parse_version(latest.version):\n footer = f\"A newer version is available v{latest.version}\"\n else:\n footer = \"You are up to date with the latest version.\"\n\n embed.add_field(\n name=\"GitHub\", value=\"https://github.com/kyb3r/modmail\", inline=False\n )\n\n embed.add_field(\n name=\"Discord Server\", value=\"https://discord.gg/F34cRU8\", inline=False\n )\n\n embed.add_field(\n name=\"Donate\",\n value=\"Support this bot on [`Patreon`](https://patreon.com/kyber).\",\n )\n\n embed.set_footer(text=footer)\n await ctx.send(embed=embed)", "def default_changelog(release_link_format: str, breaking_change_token: str = \"BREAKING\"):\n return Changelog(\n header=\"\"\"# Changelog\nAll notable changes to this project will be documented in this file.\n\nThe format is based on [Keep a Changelog] and this project adheres to\n[Semantic Versioning].\n\nTypes of changes are:\n* **Security** in case of vulnerabilities.\n* **Deprecated** for soon-to-be removed features.\n* **Added** for new features.\n* **Changed** for changes in existing functionality.\n* **Removed** for now removed features.\n* **Fixed** for any bug fixes.\"\"\",\n config=ChangelogConfig(\n release_link_format=release_link_format,\n breaking_change_token=breaking_change_token,\n ),\n releases=OrderedDict(\n {\n ReleaseTag(\"Unreleased\"): ReleaseSection(entries={}, timestamp=None),\n }\n ),\n links=OrderedDict(\n {\n \"Unreleased\": release_link_format.format(previous_tag=\"initial\", tag=\"HEAD\"),\n \"Keep a Changelog\": \"http://keepachangelog.com/en/1.0.0/\",\n \"Semantic Versioning\": \"http://semver.org/spec/v2.0.0.html\",\n },\n ),\n )", "def changelog(save, config, start=None, end=None):\n try:\n content = read_yml_file(config)\n except (yaml.YAMLError, FileNotFoundError):\n click.confirm(\"Config not found. Would you like to use default config?\", abort=True)\n default_config = Config.get_default_file_path()\n content = read_yml_file(default_config)\n\n config = Config(**content)\n changelog_config = config.changelog\n\n if start:\n changelog_config.start = start\n if end:\n changelog_config.end = end\n\n changelog = Changelog(config=changelog_config)\n\n if save:\n save_text_file(changelog.file_path, changelog.content)\n click.echo(f\"Changelog: \\\"{changelog.file_path}\\\"\")\n else:\n click.echo(\"Changelog:\")\n click.echo(\"\".join(changelog.content))\n\n click.echo(f\"Tag: \\\"{changelog.tag}\\\"\")\n click.echo(f\"Branch: \\\"{changelog.branch}\\\"\")", "def do_gitchangelog() -> None:\n # TODO: this app has lots of features for cleaning up comments\n command_name = \"gitchangelog\"\n check_command_exists(command_name)\n\n command_text = f\"{VENV_SHELL} {command_name}\".strip().replace(\" \", \" \")\n inform(command_text)\n command = shlex.split(command_text)\n with open(\"ChangeLog\", \"w+\") as change_log:\n result = execute_get_text(command, env=config_pythonpath()).replace(\"\\r\", \"\")\n change_log.write(result)", "def get_buildbot_url():\n return \"http://10.45.4.98:8001/\"", "def TrackerURL(issue):\n # make the server/project customizable?\n return 'http://code.google.com/p/lilypond/issues/detail?id=%s' % issue", "def get_url() -> str:\n parser = ArgumentParser()\n\n parser.add_argument('--url',\n type=str,\n help='Url to download log file')\n\n args = parser.parse_args()\n url = args.url\n return url", "def generate_changelog(ctx, base, sha, target):\n\n gh = ctx.obj.github\n\n default_destination = os.path.join(os.getcwd(), '{}-changelog.md'.format(sha))\n target = target or default_destination\n destination = os.path.abspath(target)\n\n utils.validate_directory_exists(os.path.abspath(os.path.join(destination, os.pardir)))\n utils.validate_file_does_not_exist(destination)\n\n changelog = _generate_changelog(gh=gh, base=base, sha=sha)\n\n with open(destination, 'w') as stream:\n rendered = changelog.render()\n stream.write(rendered)\n\n log.echo('Changelog written to: {}'.format(destination))\n\n return changelog", "def get_changelog(self, commit_sha):\n\n url = 'https://{}/{}/{}/' + commit_sha + '/CHANGELOG'\n url = url.format(HOST_GITHUB_RAW, self.repo, self.product)\n\n req = requests.get(url)\n lines = req.text\n\n first = self.latest_tags[self.num_comparisons - 1][VERS]\n last = self.latest_tags[self.num_comparisons - 2][VERS]\n flag = False\n\n log = ''\n for line in lines.splitlines():\n if first in line:\n flag = True\n if last in line:\n flag = False\n if flag:\n log += line + '\\n'\n return log", "def verbose_log_link(self) -> str:\n return pulumi.get(self, \"verbose_log_link\")", "def create_changelog (component):\n vprint (\"Creating ChangeLog entry for \" + component)\n\n old_tag = get_tag (old_comp_versions, 'ACE')\n\n # Generate changelogs per component\n path = get_path(component, \"ChangeLogs\", component + \"-\" + comp_versions[component + \"_version_\"])\n ex (\"cd $DOC_ROOT/ACE_TAO && git log \" + old_tag + \"..HEAD \" + component + \" > \" + path)\n\n return [path]", "def version(self, irc, msg, args):\n try:\n newest = utils.web.getUrl('http://supybot.sf.net/version.txt')\n newest ='The newest version available online is %s.'%newest.strip()\n except utils.web.Error, e:\n self.log.info('Couldn\\'t get website version: %s', e)\n newest = 'I couldn\\'t fetch the newest version ' \\\n 'from the Supybot website.'\n s = 'The current (running) version of this Supybot is %s. %s' % \\\n (conf.version, newest)\n irc.reply(s)", "def changelog_entries():\n changelog_entries = comments or []\n for o in options or self._DEFAULT_PORT_OPTIONS:\n changelog_entries.append(\"{keyword}: {option}\".format(keyword=mini_buildd.changes.Changes.Options.KEYWORD, option=o))\n return changelog_entries", "def get_absolute_url(self):\n return reverse('hist-detail', args=[str(self.id_historico)])", "def get_changelog(self, when=0, db=None):\r\n if not db:\r\n db = self.env.get_db_cnx()\r\n cursor = db.cursor()\r\n if when:\r\n cursor.execute(\"SELECT time,author,field,oldvalue,newvalue \"\r\n \"FROM ticket_change WHERE ticket=%s AND time=%s \"\r\n \"UNION \"\r\n \"SELECT time,author,'attachment',null,filename \"\r\n \"FROM attachment WHERE id=%s AND time=%s \"\r\n \"UNION \"\r\n \"SELECT time,author,'comment',null,description \"\r\n \"FROM attachment WHERE id=%s AND time=%s \"\r\n \"ORDER BY time\",\r\n (self.id, when, str(self.id), when, self.id, when))\r\n else:\r\n cursor.execute(\"SELECT time,author,field,oldvalue,newvalue \"\r\n \"FROM ticket_change WHERE ticket=%s \"\r\n \"UNION \"\r\n \"SELECT time,author,'attachment',null,filename \"\r\n \"FROM attachment WHERE id=%s \"\r\n \"UNION \"\r\n \"SELECT time,author,'comment',null,description \"\r\n \"FROM attachment WHERE id=%s \"\r\n \"ORDER BY time\", (self.id, str(self.id), self.id))\r\n log = []\r\n for t, author, field, oldvalue, newvalue in cursor:\r\n log.append((int(t), author, field, oldvalue or '', newvalue or ''))\r\n return log", "def history():\n return apology(\"TODO\")", "def history():\n return apology(\"TODO\")", "def history():\n return apology(\"TODO\")", "def _onChangeLog(self, event):\n cld = sc.getChangeLogDialog(self)\n cld.Show()", "def _get_pkg_changelog_contents(ctx: Context, version: str):\n changes = _get_changelog_contents(ctx, version)\n changes = \"\\n\".join(changes.split(\"\\n\")[2:])\n changes = changes.replace(\n textwrap.dedent(\n \"\"\"\n Removed\n -------\n\n \"\"\"\n ),\n \"\",\n )\n changes = changes.replace(\n textwrap.dedent(\n \"\"\"\n Deprecated\n ----------\n\n \"\"\"\n ),\n \"\",\n )\n changes = changes.replace(\n textwrap.dedent(\n \"\"\"\n Changed\n -------\n\n \"\"\"\n ),\n \"\",\n )\n changes = changes.replace(\n textwrap.dedent(\n \"\"\"\n Fixed\n -----\n\n \"\"\"\n ),\n \"\",\n )\n changes = changes.replace(\n textwrap.dedent(\n \"\"\"\n Added\n -----\n\n \"\"\"\n ),\n \"\",\n )\n return changes" ]
[ "0.7049577", "0.6941905", "0.66407025", "0.6581072", "0.6575993", "0.6410137", "0.6290972", "0.61633486", "0.61253595", "0.6025912", "0.5953337", "0.591455", "0.5777124", "0.57560915", "0.5750858", "0.57106954", "0.5691371", "0.5683107", "0.55855507", "0.55481666", "0.5528186", "0.5522323", "0.5520173", "0.5518586", "0.54889137", "0.54727435", "0.54727435", "0.54727435", "0.54617906", "0.5411126" ]
0.7766924
0
Download raw REPY data from Technion, convert it to almostunicode (numbers are reversed)
def repy_data(): REPY_URI = "http://ug.technion.ac.il/rep/REPFILE.zip" if prefs.options.usecache: t = open("REPFILE.zip") else: try: t = tempfile.TemporaryFile() t.write(urllib.urlopen(REPY_URI).read()) except: ttime.warning(_("Network download of REPFILE.zip failed, " \ "trying local")) try: t = open("REPFILE.zip") except: raise _("REP file download has failed") try: z = zipfile.ZipFile(t) except: raise _("REP file is not a valid zip file!") repy_data = '\n'.join([ bidi_flip(unicode(x).rstrip('\r')) for x in unicode(z.read('REPY'), 'cp862').split('\n') ]) z.close t.close return repy_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decode(data): #@NoSelf", "def get_and_prepare_data_string():\n\n request = requests.get(\"https://pastebin.com/raw/a83ELw6K\")\n request.encoding = 'ISO-8859-1'\n\n return request.text", "def decode(self, encoded):", "def decode(self, s):", "def decode(self, s):", "def convert_txt_to_data():\n pass", "def download_to_utf_string(url: str) -> str:\n request = get(url)\n content = request.content.decode(\"utf-8\")\n return content", "def convertFromUnicode(content):\n return content", "def decode_bytes(data: bytearray) -> str:\n pattern = re.compile('\\r', re.UNICODE)\n res = data.decode('utf-8', 'ignore')\n res = pattern.sub('', res)\n return res", "def _decode_data(self, data):\r\n return data.decode('ISO-8859-1')", "def getraw_encoded(self):\n # update data model\n self.dataModel.setTestData( testData=self.srcEditor.text() )\n\n # return raw file\n return self.dataModel.getRaw()", "def decode_raw(data):\n return RawWire().decode(data)", "def decodeUtf8(self, arrayBuffer):", "def decodeUtf8(self, arrayBuffer):", "def decode_content(raw_content):\n return raw_content", "def read_file(input_file):\n\n\ttext = open(input_file)\n\traw = text.read()\n#\tdecoded = raw.decode('utf8').encode('ascii', 'replace')\n\tdecoded = raw.decode('utf8')\n\n\t#moves this through the html cleaner\n\ttext = plaintext(decoded)\n\n\treturn text", "def test_file_bin_read_unicode_as_bin(self):\n FileWriter(self.unicode_path).write_utf8(self.unicode_string)\n bin_data = FileReader(self.unicode_path).read_bin() #read unicode file as binary\n uni_text = bin_data.decode(\"utf-8\") #decode to utf-8\n self.assertEqual(uni_text, self.unicode_string)", "def decode(data):\n raise NotImplementedError", "def get_sprot_raw(id):\n return urllib.urlopen(\"http://www.uniprot.org/uniprot/%s.txt\" % id)", "def getData(self):\n return utf8decoder(self.data)[0]", "def decode_uref(as_bytes: typing.List[int]) -> str:\n raise NotImplementedError()", "def to_unicode(data):\n if isinstance(data, bytes):\n return data.decode('utf-8')\n else:\n return data", "def get_transmission_data(rvt_file, cleaned_str=False):\n if olefile.isOleFile(rvt_file):\n rvt_ole = olefile.OleFileIO(rvt_file)\n transmission_data = rvt_ole.openstream(\"TransmissionData\").read().decode(\"ascii\", \"ignore\")\n if cleaned_str:\n re_nullbytes = re.compile(r\"\\x00\")\n transmission_data = re.sub(re_nullbytes, \"\", transmission_data)\n return transmission_data\n else:\n print(\"file does not appear to be an ole file: {}\".format(rvt_file))", "def recodeToUtf8(data):\n try:\n data = data.decode('utf8').encode('utf8')\n return data\n except UnicodeDecodeError:\n encoding = chardet.detect(data)['encoding']\n logging.log(5, 'encoding should be %s' % encoding)\n if encoding == None:\n encoding = 'latin1'\n try:\n data = data.decode(encoding).encode('utf8')\n except UnicodeDecodeError:\n logging.warn('Error when decoding as %s' % encoding)\n data = data\n except LookupError:\n logging.warn('Unknown encoding when decoding as %s' % encoding)\n data = data\n\n return data\n\n return", "def download_data():\n # Download Unihan meta data for radical-stroke analysis\n os.system(' mkdir Unihan')\n os.system(' curl -O http://unicode.org/Public/UCD/latest/ucd/Unihan.zip')\n os.system(' apt-get -y install unzip')\n os.system(' unzip Unihan.zip -d Unihan/')\n os.system(' rm Unihan.zip')\n\n data_path = 'Unihan/Unihan_RadicalStrokeCounts.txt'\n assert(os.path.isfile(data_path))\n\n return data_path", "def get_raw_data(url):\n\n req = requests.get(url, stream=True)\n req.raw.decode_content = True\n return req.raw", "def _decode_binary(data):\n try:\n data = data.decode('utf-8')\n except UnicodeDecodeError: # pragma: no cover\n # for data written an upstream java App\n data = data.decode('latin-1')\n return data", "def test_decode():\n assert TextCleaner().transform([[\"tést\"]])[\"corpus\"][0] == \"test\"", "def decode(decode_format):\n return output_from_decode", "def loads(data):\n return Decoder().decode(data)" ]
[ "0.593756", "0.5877718", "0.57247186", "0.56543195", "0.56543195", "0.56461793", "0.56322694", "0.5608812", "0.5603942", "0.5595547", "0.5556599", "0.55428666", "0.5420726", "0.5420726", "0.53380746", "0.5324333", "0.5293145", "0.5286872", "0.52605474", "0.5250356", "0.5247406", "0.5235432", "0.5208806", "0.52081007", "0.5201296", "0.51902103", "0.5187298", "0.51849586", "0.5166036", "0.5164681" ]
0.6201089
0
convert time in seconds from 1998.1.1 to fractional year
def sectoFracYear(stime): ltime = convertCtimeToYdate(stime) atemp = re.split(':', ltime) year= int(atemp[0]) ydate = int(atemp[1]) hours = int(atemp[2]) minutes = int(atemp[3]) seconds = int(atemp[4]) chk = 4.0 * int(0.25 * year) if chk == year: base = 366 else: base = 365 day = ydate + hours / 24.0 + minutes / 1440.0 + seconds / 86400.0 return year + day / base
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def datetime_to_decimal_year(time):\n if not isinstance(time, datetime):\n raise TypeError(\"The input must be a datetime object.\")\n\n year_start = datetime(year=time.year, month=1, day=1)\n next_year_start = datetime(year=time.year+1, month=1, day=1)\n\n year_elapsed = (time - year_start).total_seconds()\n year_total = (next_year_start - year_start).total_seconds()\n\n return time.year + year_elapsed / year_total", "def _two_digit_year(t):\n dt = safe_fromtimestamp(t)\n year = dt.year\n if dt.month >= 7:\n year += 1\n return \"'%02d\" % (year % 100)", "def yearfrac(self) -> float:\n return (self.last_idx - self.first_idx).days / 365.25", "def normalise_two_digit_year(y):\r\n if y[0] == \"'\":\r\n y = y[1:]\r\n if int(y) < 39:\r\n return '%04d' % (int(y) + 2000)\r\n elif int(y) < 100:\r\n return '%04d' % (int(y) + 1900)\r\n else:\r\n return '%04d' % int(y[:4])", "def calculate_year_fraction(self, t):\n try:\n return t / 365.\n except (TypeError, AttributeError):\n return self.day_counter.year_fraction(self.curve_date, t)", "def _four_digit_year(t):\n dt = safe_fromtimestamp(t)\n year = dt.year\n if dt.month >= 7:\n year += 1\n return str(year)", "def year(self) -> int:\n if self.is_old_style:\n yy = int(self.split('/', 1)[1][0:2])\n else:\n yy = int(self[:2])\n if yy > 90:\n return 1900 + yy\n return 2000 + yy", "def yearlyDepreciation():\n return .10", "def days_to_years(datum):\n return datum/DAYS_PER_YEAR", "def make_year(res):\n return str(res['issued']['date-parts'][0][0])", "def decade(year):\r\n # get the first 3 digits of the year\r\n partial = (year[0]//10).item()\r\n # add a 0 to the end, return as decade\r\n return partial * 10", "def dt_to_dec(dt):\n year_start = datetime(dt.year, 1, 1)\n year_end = year_start.replace(year=dt.year+1)\n return dt.year + ((dt - year_start).total_seconds() / # seconds so far\n float((year_end - year_start).total_seconds())) # seconds in year", "def century(year):\r\n century = 0\r\n last_digit = year % 10\r\n if year >= 1 and last_digit == 0:\r\n century = year // 100 \r\n else:\r\n century = year // 100 + 1\r\n return century", "def calendar_year(cls, tee):\n return iround(((tee - OldHindu.EPOCH) / cls.SIDEREAL_YEAR) - (cls.solar_longitude(tee) / 360))", "def calendar_year(cls, tee):\n return iround(((tee - OldHindu.EPOCH) / cls.MEAN_SIDEREAL_YEAR) - (sidereal_solar_longitude(tee) / 360))", "def year(cls, year: typing.Union[int, str])->str:\n yearstr: str\n if isinstance(year, int):\n yearstr = str(year)\n else:\n yearstr = year\n return cls.DATE_AND_TIMES_SIGIL + yearstr + \"-01-01T00:00:00/9\"", "def date_year(date):\n return date.year", "def _convert_sfx_timestamp(ts: int) -> float:\n return float(ts) / 1000", "def number_times_per_year(f, f_unit):\n return round(f * converter['y'] / converter[f_unit])", "def _unit_yr(self):\n return ((self.time_base * 60.0) * 24.0) * 365.0", "def num2year(iyear):\n iyear = np.asarray([int(y) for y in iyear])\n fyear = lambda y, m, d: y + (m - 1)/12. + d/365.25\n ymd = [int2ymd(iy) for iy in iyear]\n return [fyear(y,m,d) for y,m,d in ymd]", "def get_year(self):\n\n # First we get the first 8 bits stored in the yqr register\n year_bcd = self.__read_register(_REGISTER_YEAR)\n\n # Then we extract the digits and the tens\n tens = (year_bcd & 0xF0) >> 4 # 0xF0 = 0b11110000\n digit = (year_bcd & 0x0F) # 0x0F = 0b00001111\n\n # We return year value shifted in range [1970..2129]\n return (10 * (tens) + digit) + 1970", "def time_to_str(s):\n seconds_in_year = 31556925.9747 # a standard SI year\n orig_s = s\n years = int(s / (seconds_in_year))\n r = []\n if years:\n r.append ('%sY' % (years))\n s -= years * (seconds_in_year)\n months = int(s / (seconds_in_year/12.0))\n if months:\n r.append ('%sM' % (months))\n s -= months * (seconds_in_year/12.0)\n days = int(s / (60*60*24))\n if days:\n r.append ('%sd' % (days))\n s -= days * 60*60*24\n hours = int(s / (60*60))\n if hours:\n r.append ('%sh' % (hours))\n s -= hours * 60*60\n minutes = int(s / 60)\n if minutes:\n r.append ('%sm' % (minutes))\n s -= minutes * 60\n seconds = int(s)\n if seconds:\n r.append ('%.1fs' % (s))\n s -= seconds\n elif not r:\n mseconds = int(s*1000)\n if mseconds:\n r.append ('%sms' % (mseconds))\n s -= mseconds / 1000\n elif not r:\n useconds = int(s*1000000)\n if useconds:\n r.append ('%sus' % (useconds))\n s -= useconds / 1000000\n elif not r:\n nseconds = int(s*1000000000)\n if nseconds:\n r.append ('%sns' % (nseconds))\n s -= nseconds / 1000000000\n if not r:\n return '0'\n return ''.join(r)", "def date_second(date):\n return date.second", "def parse_year(year):\n\n return datetime.strptime(year, '%Y')", "def test_interval_to_seconds_with_years(self):\n self.assert_interval_to_seconds(0, \"0y\", \"0year\", \"0years\")\n self.assert_interval_to_seconds(31536000, \"1y\", \"1year\", \"1years\")\n self.assert_interval_to_seconds(5 * 31536000, \"5y\", \"5year\", \"5years\")\n self.assert_interval_to_seconds(\n 123 * 31536000, \"123y\", \"123year\", \"123years\")\n self.assert_interval_to_seconds(\n 2 * 31536000, \"02y\", \"02year\", \"02years\")", "def test_convert_date_to_year(self):\n # TODO there might be a more robust way to write this with try except statements.", "def YEAR(date):\n return _make_datetime(date).year", "def unit_yr(self):\n return ((self.time_base * 60.0) * 24.0) * 365.0", "def translate_years(val):\n if val.find(\"-\") > 0:\n tokens = re.findall(\"[0-9]+\", val)\n one = int(tokens[0])\n two = int(tokens[1])\n one = (1900 + one) if one > 50 else (2000 + one)\n two = (1900 + two) if two > 50 else (2000 + two)\n return range(one, two + 1)\n tokens = re.findall(\"[0-9]+\", val)\n return [int(f\"{'19' if int(t) > 50 else '20'}{t}\") for t in tokens]" ]
[ "0.6982566", "0.6425419", "0.63888425", "0.6374404", "0.63053405", "0.63038373", "0.62888265", "0.6287638", "0.625002", "0.6155581", "0.60713786", "0.6063753", "0.60450315", "0.5957644", "0.593662", "0.5935859", "0.59352255", "0.5934942", "0.59210765", "0.5891124", "0.5833839", "0.5822089", "0.5800763", "0.5778469", "0.577406", "0.5755295", "0.5754181", "0.57378376", "0.57293", "0.57044625" ]
0.7589334
0
Initialize summary logger (if needed).
def initialize_summary(self): if self.need_logs: self.summary_writer = tf.summary.create_file_writer(self.log_dir) if self.verbose > 0: full_log_path = os.path.abspath(self.log_dir) print('Initialize logs, use: \ntensorboard --logdir={}'.format(full_log_path))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, log_dir):\n self.writer = SummaryWriter(log_dir)", "def initialize_log():\n logging.basicConfig(\n format='%(asctime)s %(levelname)-8s %(message)s',\n level=logging.INFO,\n datefmt='%Y-%m-%d %H:%M:%S',\n )", "def _init_logging(self):\n # Setup logging variable\n self.log = logging.getLogger(\"collection-log\")\n self.log.setLevel(logging.INFO)\n self.formatter = logging.Formatter(\"%(asctime)s %(message)s\", \"%Y-%m-%d %H:%M:%S\")\n\n # Log to stdout\n streamhandler = logging.StreamHandler()\n streamhandler.setLevel(logging.INFO)\n streamhandler.setFormatter(self.formatter)\n self.log.addHandler(streamhandler)", "def setup_log(self):\n self.logger, _ = get_logger(\"datatransform\")", "def _init():\n global logger\n logger = logging.getLogger(\"Log\")", "def initLogging(self):\n logging.basicConfig(level=self.loglevel, stream=sys.stderr)", "def __init__(self, log_dir):\n self.writer = tf.summary.FileWriter(log_dir)\n self.log_dict = {}", "def initLogger(self):\n loglevel = self.loglevels[self.loglevel]\n log_format = '%(asctime)s name=%(name)s loglevel=%(levelname)s message=%(message)s'\n logging.basicConfig(format=log_format,\n level=loglevel)\n \tmultiprocessing.log_to_stderr(loglevel)", "def setup_logging():\n log_format = '%(asctime)-15s %(levelname)s: %(message)s'\n logging.basicConfig(format=log_format, level=logging.DEBUG,\n filename='counting_consumer.out')", "def init_log():\n log_op = LogOp()\n log_op.config_log(logging.DEBUG)", "def init_tensorboard_logger(self, **kwargs):\n self._tensorboard_logger = SummaryWriter(logdir=self.tensorboard_path, **kwargs)\n self.log.info(\"Tensorboard Logger initialized in: \" + str(self.tensorboard_path))\n return self._tensorboard_logger", "def __init__(\n self,\n log_dir: str,\n *args: Any,\n resume_run_id: Optional[str] = None,\n **kwargs: Any,\n ):\n self.writer = None\n if log_dir is not None and len(log_dir) > 0:\n self.writer = SummaryWriter(log_dir, *args, **kwargs)", "def __init__(self):\n if 'LOG_LEVEL' in os.environ:\n log_level = os.environ['LOG_LEVEL']\n else:\n log_level = 'INFO'\n\n logging.basicConfig(\n format='%(levelname)s:%(message)s',\n level=log_level)\n\n if 'TOLERATE' in os.environ:\n self.tolerance_name(os.environ['TOLERATE'])\n else:\n self.tolerance_name('Medium')\n\n self._max_severity_level = 0\n self._filename = None\n self._show_all = False\n\n if 'SHOW_ALL_VULNERABILITIES' in os.environ:\n self.show_all(True)", "def __init__(self, default_level=logging.WARNING):\n # All loggers are an attr of self for tab completion in iPython\n # (with . replaced with _)\n self._loggerdict = logging.Logger.manager.loggerDict\n for name, logger in self._loggerdict.iteritems():\n attr = name.replace('.', '_')\n setattr(self, attr, logger)\n\n if len(logging.root.handlers) == 0:\n # The default level is INFO\n fmt='%(levelname)-7s | %(asctime)-23s | %(name)-8s | %(message)s'\n logging.basicConfig(format=fmt, level=default_level)\n logging.StreamHandler.emit = self._emit_wrap", "def __init__(self):\n\n self._logger = logging.getLogger(__name__)", "def init_logger():\n logging.basicConfig(\n stream=sys.stdout,\n level=logging.INFO,\n format='%(asctime)s.%(msecs)03d %(name)s:%(lineno)d %(levelname)s %(message)s',\n datefmt='%m-%d %H:%M:%S',\n )", "def init_logger():\n lformat = \"%(asctime)s [%(levelname)-5.5s] [%(name)s] [%(threadName)-12.12s] %(message)s\"\n\n logging.basicConfig(\n level=logging.INFO,\n format=lformat,\n )\n\n file_handler = handlers.RotatingFileHandler(\n \"{0}/{1}.log\".format('.', 'meta-meta-hive'),\n maxBytes=(50*1024*1024),\n backupCount=7\n )\n file_handler.setFormatter(logging.Formatter(lformat))\n logging.getLogger().addHandler(file_handler)\n return", "def _initialize_logging(self):\n LOG_CFG = os.environ.get('LOG_CFG', 'LOCAL')\n configure_logging(LOG_CFG)\n self.logger = logging.getLogger(self.__class__.__name__)", "def __setup_logging(self):\n\n loglevel = logging.INFO\n if self.config[\"verbose\"]:\n loglevel = logging.DEBUG\n\n FORMAT = '[%(asctime)s %(filename)s:%(lineno)s %(levelname)s] %(message)s'\n if self.config[\"log\"]:\n logging.basicConfig(format=FORMAT, level=loglevel, filename=self.config[\"log\"])\n else:\n logging.basicConfig(format=FORMAT, level=loglevel)", "def __init__(self):\n self.log = logging.getLogger()", "def _setup_logger():\n root = logging.getLogger()\n root.setLevel(logging.INFO)\n\n log_handle = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter(\n \"[%(levelname)s] (%(asctime)s) - %(message)s\", datefmt=\"%Y-%m-%d %H:%M:%S\"\n )\n log_handle.setFormatter(formatter)\n root.addHandler(log_handle)\n\n logging.info(\"Initializing snakes\")", "def initialize_logger(self):\n\n # initialize logger\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n # logger console handler\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setLevel(logging.INFO)\n console_handler.setFormatter(logging.Formatter(\"\"))\n logger.addHandler(console_handler)", "def __init__(self, log_dir):\n self.writer = tf.summary.FileWriter(log_dir)", "def init_logging():\n global logger\n logger = logging.getLogger('autogen_quartus')", "def initialize(self, context):\n self.logger = None\n pass", "def __init__(self, log_dir):\n self.writer = tf.summary.create_file_writer(log_dir)", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.log = logging.getLogger(logger_name(__name__))", "def setUp(self):\n\n self.logger_stats = DataScreen()", "def __init__(self, log_dir):\n self.writer = tf.summary.FileWriter(log_dir)\n self.histograms = {}", "def __init__(self, log_dir, comment=None):\n self.log_dir = log_dir\n self.comment = comment\n if comment is None:\n self.writer = SummaryWriter(log_dir)\n else:\n self.writer = SummaryWriter(log_dir, comment=comment)" ]
[ "0.72479147", "0.719565", "0.6993717", "0.684605", "0.67818", "0.6757587", "0.6726005", "0.67242354", "0.67202115", "0.6719694", "0.66944724", "0.66559464", "0.66170186", "0.6603188", "0.6601731", "0.65876406", "0.6564645", "0.6533853", "0.6529449", "0.65213436", "0.64860255", "0.6468369", "0.64643186", "0.64592975", "0.6459184", "0.64505374", "0.64453876", "0.64145607", "0.6406305", "0.6400327" ]
0.7754888
0
Get operation profiling info.
def get_profile_op_info(): profiler_dir = get_profiler_dir(request) train_id = get_train_id(request) if not profiler_dir or not train_id: raise ParamValueError("No profiler_dir or train_id.") search_condition = request.stream.read() try: search_condition = json.loads(search_condition if search_condition else "{}") except Exception: raise ParamValueError("Json data parse failed.") validate_condition(search_condition) device_id = search_condition.get("device_id", "0") profiler_dir_abs = os.path.join(settings.SUMMARY_BASE_DIR, train_id, profiler_dir) try: profiler_dir_abs = validate_and_normalize_path(profiler_dir_abs, "profiler") except ValidationError: raise ParamValueError("Invalid profiler dir") op_type = search_condition.get("op_type") analyser = AnalyserFactory.instance().get_analyser( op_type, profiler_dir_abs, device_id ) op_info = analyser.query(search_condition) return jsonify(op_info)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_profile_stats():\n return p_stats", "def print_performance_info(self):\n pass", "def map_profile_info(profile):\n result = map(\n lambda p: {\n 'callcount': p.callcount,\n 'time': p.totaltime,\n 'name': p.code if isinstance(p.code, str) else p.code.co_name,\n 'file': None if isinstance(p.code, str) else p.code.co_filename},\n profile.getstats())\n return result", "def get_statistics(self):\n\n return (self.func_id, self.instruction_count)", "def getprofile(): # real signature unknown; restored from __doc__\n pass", "def profile(self):\n return self.__profile", "def profiler(self):\r\n\r\n class Task(object):\r\n \"Private class to nicely wrap up the profile data\"\r\n def __init__(self, block, addr):\r\n self.block = block\r\n self.addr = addr\r\n self.name = None\r\n def tidy(self, sym):\r\n self.name = sym.varfind(self.addr).name\r\n self.CPU_FRAC = sym.constfind(\"$profiler.CPU_FRACTION_FIELD\").value\r\n def __repr__(self):\r\n if self.name is None:\r\n raise Exception(\"Need to call the tidy method before using\")\r\n return \"%-50s - %2.1f %%\" % (self.name, self.block[self.CPU_FRAC]/1000)\r\n\r\n\r\n # get the head of the list and a couple of constants\r\n head = self._core.sym.varfind(\"$profiler.last_addr\").addr\r\n NULL = self._core.sym.constfind(\"$profiler.LAST_ENTRY\").value\r\n SIZE = self._core.sym.constfind(\"$profiler.STRUC_SIZE\").value\r\n NEXT = self._core.sym.constfind(\"$profiler.NEXT_ADDR_FIELD\").value\r\n\r\n # get the first address\r\n curr = self._core.dm[head]\r\n\r\n # read all the structures off the chip as fast as we can\r\n tasks = []\r\n while curr != NULL:\r\n block = self._core.dm[curr:(curr+SIZE)]\r\n tasks.append(self.Task(block, curr))\r\n curr = block[NEXT]\r\n\r\n # now fill in the other bits\r\n for t in tasks:\r\n t.tidy(self._core.sym)\r\n\r\n # finally return\r\n return tasks", "def get_operation_data(self):\n op_data = {}\n try:\n op_data = self.factory.odoo_con.get_op_data(self.user_id, self.task_id)\n\n\n # if op_data:\n # self.state = \"scan_op\"\n except Exception, e:\n expt_str = e.message\n self._snd(expt_str)\n return op_data", "def info(self):\n return self.current_run.info", "def profiler():\r\n KEY = 'web2py_profiler_size'\r\n filename = global_settings.cmd_options.profiler_filename\r\n data = 'profiler disabled'\r\n if filename:\r\n if KEY in request.cookies:\r\n size = int(request.cookies[KEY].value)\r\n else:\r\n size = 0\r\n if os.path.exists(filename):\r\n data = read_file('profiler.log','rb')\r\n if size<len(data): \r\n data = data[size:]\r\n else: \r\n size=0\r\n size += len(data)\r\n response.cookies[KEY] = size\r\n return data", "def profile(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"profile\")", "def get_info(self): \n return {\n \"ident\": self.ident,\n \"interval\": self._interval,\n \"exception\": self._exception,\n \"execute\": self._execute,\n \"args\": self._args,\n \"kwargs\": self._kwargs}", "def _profile_module(self):\n with open(self._run_object, 'r') as srcfile:\n src_code = srcfile.read()\n code = compile(src_code, self._run_object, 'exec')\n try:\n with _CodeHeatmapCalculator() as prof:\n exec(code, self._globs, None)\n except SystemExit:\n pass\n\n heatmaps = []\n for filename, heatmap in prof.heatmap.items():\n if os.path.isfile(filename):\n heatmaps.append(\n self._format_heatmap(\n filename, heatmap, prof.execution_count[filename]))\n\n run_time = sum(heatmap['runTime'] for heatmap in heatmaps)\n return {\n 'objectName': self._run_object,\n 'runTime': run_time,\n 'heatmaps': heatmaps\n }", "def rpc_info():", "def getStati(self):\n raise \"not implemented\"", "def profile(self):\n return self._profile", "def getOperationCount():\n return _operationCount", "def get_proc_stats(proc):\n file_size = os.path.getsize(proc['filename'])\n return {\n 'file_size': file_size,\n 'formatted_file_size': size(file_size),\n 'started_at': time.strftime(\n \"%H:%M\", time.localtime(proc['time'])),\n 'recording_time': str(\n timedelta(seconds=int(time.time()) - proc['time']))\n }", "def GetCpuTimestamp(self):\n return {'TotalTime': time.time()}", "def _profile_package(self):\n with _CodeHeatmapCalculator() as prof:\n try:\n runpy.run_path(self._run_object, run_name='__main__')\n except SystemExit:\n pass\n\n heatmaps = []\n for filename, heatmap in prof.heatmap.items():\n if os.path.isfile(filename):\n heatmaps.append(\n self._format_heatmap(\n filename, heatmap, prof.execution_count[filename]))\n\n run_time = sum(heatmap['runTime'] for heatmap in heatmaps)\n return {\n 'objectName': self._run_object,\n 'runTime': run_time,\n 'heatmaps': heatmaps\n }", "def profile(self, layer, num_iter=50, num_warmup=10, direction='forward'):\n return TimeMeasure()", "def get_info(self):\n pass", "def get_info(self):\n pass", "def loading_info(self):\n rv = \"\"\n for thing in ['ProfileName', 'n_subbands', 'subbands_width',\n 'n_dumps', 'dump_length', 'format', 'sourceRawProfileName',\n 'sourceProfileName', 'processingType', 'processingFileName']:\n rv += thing + ': ' + str(getattr(self,thing)) + '\\n'\n return rv", "def stats(self):\n return self.rpc.call(MsfRpcMethod.CoreModuleStats)", "def _base_stats(self):\n usage = resource.getrusage(resource.RUSAGE_SELF)\n return {'host': self.application.host,\n 'port': self.application.port,\n 'requests': self.application.counters,\n 'timestamp': int(time.time()),\n 'block': {'input': usage.ru_inblock,\n 'output': usage.ru_oublock},\n 'context_switches': usage.ru_nvcsw + usage.ru_nivcsw,\n 'cpu_time': {'user': usage.ru_utime,\n 'system': usage.ru_stime},\n 'memory_usage': usage.ru_maxrss,\n 'page_faults': {'minor': usage.ru_minflt,\n 'major': usage.ru_majflt},\n 'page_size': resource.getpagesize(),\n 'signals_received': usage.ru_nsignals,\n 'swap_outs': usage.ru_nswap}", "def operation_counts(self) -> Dict[int, Dict[str, int]]:\n return self._operation_counts", "def get_telemetry ():\n telemetry = OrderedDict()\n\n telemetry[\"ip_addr\"] = socket.gethostbyname(socket.gethostname())\n\n telemetry[\"mem_free\"] = psutil.virtual_memory().free\n\n telemetry[\"cpu_num\"] = psutil.NUM_CPUS\n\n x = psutil.cpu_times()\n telemetry[\"cpu_times\"] = OrderedDict([ (\"user\", x.user), (\"system\", x.system), (\"idle\", x.idle) ])\n\n x = psutil.disk_usage(\"/tmp\")\n telemetry[\"disk_usage\"] = OrderedDict([ (\"free\", x.free), (\"percent\", x.percent) ])\n\n x = psutil.disk_io_counters()\n telemetry[\"disk_io\"] = OrderedDict([ (\"read_count\", x.read_count), (\"write_count\", x.write_count), (\"read_bytes\", x.read_bytes), (\"write_bytes\", x.write_bytes), (\"read_time\", x.read_time), (\"write_time\", x.write_time) ])\n\n x = psutil.network_io_counters()\n telemetry[\"network_io\"] = OrderedDict([ (\"bytes_sent\", x.bytes_sent), (\"bytes_recv\", x.bytes_recv), (\"packets_sent\", x.packets_sent), (\"packets_recv\", x.packets_recv), (\"errin\", x.errin), (\"errout\", x.errout), (\"dropin\", x.dropin), (\"dropout\", x.dropout) ])\n\n return telemetry", "def profile_module(self):\n return base_profiler.run_in_separate_process(self._profile_module)", "async def sysinfo(self, ctx: Context):\n\t\tstart = time.perf_counter()\n\t\tend = time.perf_counter()\n\t\tduration = (end - start) * 1000\n\t\tcpuavg = psutil.cpu_percent(interval=None)\n\t\tmem = psutil.virtual_memory()[2]\n\t\tdurround = round(duration, 3)\n\t\tosun = os.uname()\n\t\tawait self.send(f\"System Info | CPU: {cpuavg}% | RAM: {mem}% | Latency: {durround * 1000}ms | OS: {sys.platform}\", whisper=[ctx.author.id])" ]
[ "0.66240704", "0.62480927", "0.6232298", "0.6200571", "0.61967117", "0.605343", "0.6047269", "0.58772755", "0.57756025", "0.57734996", "0.57548666", "0.57354444", "0.57291603", "0.5698983", "0.5678599", "0.5661551", "0.5656184", "0.5646105", "0.56389225", "0.5613454", "0.56043077", "0.55990905", "0.55990905", "0.55970556", "0.5596737", "0.5588261", "0.55848235", "0.55828065", "0.5581523", "0.557509" ]
0.7344386
0
Get profile device list.
def get_profile_device_list(): profiler_dir = get_profiler_dir(request) train_id = get_train_id(request) if not profiler_dir or not train_id: raise ParamValueError("No profiler_dir or train_id.") profiler_dir_abs = os.path.join(settings.SUMMARY_BASE_DIR, train_id, profiler_dir) try: profiler_dir_abs = validate_and_normalize_path(profiler_dir_abs, "profiler") except ValidationError: raise ParamValueError("Invalid profiler dir") device_list = analyse_device_list_from_profiler_dir(profiler_dir_abs) return jsonify(device_list)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_devices(self):\n response = self.oauth.get(url=f'{self.base_url}/json/devices/list')\n\n result = response.json()['device']\n for device in result:\n print(device)", "def list_devices(self):\n return [x for x in self.devices.keys()]", "def get_devices(self):\n return self.api_request('GET', self.url + '/device', {})", "def get_profiles(self):\n profiles = [['Profile name', 'GUID']]\n r = self.system_cursor.execute('{Call wtGetProfileList()}')\n for row in r.fetchall():\n profiles.append([row.PROFILE_NAME, row.PROFILE_GUID])\n return profiles", "def load_devices(self):\n response = self.oauth.get(url=f'{self.base_url}/json/devices/list')\n\n result = response.json()['device']\n return [(device['id'], device['name'], device['state']) for device in result]", "def list_devices():\r\n DeviceManagerCLI.BuildDeviceList()\r\n return DeviceManagerCLI.GetDeviceList()", "def get_devices():\n devices = []\n for device_id in range(pm.lib.Pm_CountDevices()):\n devices.append(DeviceInfo(device_id))\n\n return devices", "def list_devices():\n return _lib.SeaTeaseAPI().list_devices()", "def do_list(self, _):\n devices = []\n for source in self._target.devices:\n devices.append({\n 'name': source.device['name'],\n 'path': source.device['path'],\n })\n return devices", "def get_devices(self):\n return get_devices(self.api_key)", "def profile_devices(request, pk):\n context = {}\n profile = get_object_or_404(ConfigurationProfile, pk=pk)\n to_remove = InstallationRecord.objects.filter(profile=profile, device__pending__in=[profile], active=True,\n version=\"RM\")\n pending = Laptop.objects.filter(pending__in=[profile]).exclude(install_records__in=to_remove)\n installed = InstallationRecord.objects.filter(profile=profile, device__installed__in=[profile], active=True)\\\n .exclude(version=\"RM\")\n pending_removal = []\n for record in to_remove:\n pending_removal.append(record.device)\n context['resource'] = profile\n context['resource_type'] = 'Profile'\n context['pending'] = pending\n context['pending_removal'] = pending_removal\n context['installed'] = installed\n context['today'] = timezone.now()\n context['expiration_warning'] = timezone.now() + timezone.timedelta(days=30)\n return render(request, 'mdm/device_list.html', context)", "def list_devices(arn=None, nextToken=None):\n pass", "def get_devices(self):\n devices = self.get(\"event/device\")", "def GetAllDevices(self):\n\n return list(self.YieldAllDevices())", "def get_devices(self):\n devices = []\n for i in self.devices:\n devices.append(self.devices[i])\n\n return devices", "def list_devices(cls, filters={}):\n return cls.dbdriver.list_devices(filters)", "def get_list_of_devices(self, give_json=False):\n\n url = Constants.BASE_URL + 'users/devices'\n response = requests.get(url=url, params={'key': self.user_access_token})\n\n if give_json:\n return response.json()\n else:\n return response.text", "def getDeviceList(self):\n return defer.succeed(self.discovered)", "def _internal_get_upnp_device_list(self) -> List[dict]:\n\n upnp_device_list = []\n\n for device in self._all_devices.values():\n if device.device_type == \"network/upnp\":\n upnp_device_list.append(device)\n\n return upnp_device_list", "def ret_device_list():\n token = get_auth_token() # Get Token\n url = \"https://sandboxdnac.cisco.com/api/v1/network-device\"\n hdr = {'x-auth-token': token, 'content-type' : 'application/json'}\n resp = requests.get(url, headers=hdr) # Make the Get Request\n device_list = resp.json()\n return device_list", "def get_devices():\n names = devices.list()\n if request.args.get('full') is not None:\n data = {d: devices.show(d) for d in names}\n else:\n data = names\n return jsonify({'devices': data})", "def list_devices(self):\n xml = str(self._server.listDevices())\n return self._parse_cabling_xml(xml)", "def getDeviceList(self):\r\n\r\n self._logger.debug(\"In getDeviceList()...\")\r\n\r\n # update the security token if needed \r\n if self._checkToken():\r\n\r\n response = self._callAPI(_API_GET_DEVICE_LIST, useSession=True)\r\n\r\n if response is not None:\r\n\r\n deviceInfo = response.json()\r\n \r\n if response.status_code == 200 and \"items\" in deviceInfo:\r\n\r\n deviceList = []\r\n\r\n for dev in deviceInfo[\"items\"]:\r\n\r\n # pull out common attributes\r\n deviceID = dev[\"serial_number\"]\r\n deviceType = dev[\"device_family\"]\r\n description = dev.get(\"name\", deviceType + \" \" + deviceID[-4:])\r\n\r\n # uncomment the next line to inspect the devices returned from the MyQ service\r\n self._logger.debug(\"Device Found - Device ID: %s, Device Type: %s, Description: %s\", deviceID, deviceType, description)\r\n\r\n # add device to the list with properties based on type\r\n if deviceType == API_DEVICE_TYPE_GATEWAY:\r\n\r\n # get gateway attributes\r\n online = dev[\"state\"][\"online\"]\r\n lastUpdated = dev[\"state\"][\"last_status\"]\r\n\r\n # add gateway device to list\r\n deviceList.append({\r\n \"type\": deviceType,\r\n \"id\": deviceID,\r\n \"description\": description,\r\n \"online\": online,\r\n \"last_updated\": lastUpdated\r\n })\r\n\r\n elif deviceType == API_DEVICE_TYPE_OPENER:\r\n \r\n # get the door attributes\r\n parentID = dev[\"parent_device_id\"] \r\n state = dev[\"state\"][\"door_state\"]\r\n lastChanged = dev[\"state\"][\"last_update\"]\r\n lastUpdated = dev[\"state\"][\"last_status\"]\r\n\r\n # add garage door opener device to list\r\n deviceList.append({\r\n \"type\": deviceType,\r\n \"id\": deviceID,\r\n \"parent_id\": parentID,\r\n \"description\": description,\r\n \"state\": state,\r\n \"last_changed\": lastChanged,\r\n \"last_updated\": lastUpdated\r\n })\r\n \r\n elif deviceType == API_DEVICE_TYPE_LAMP:\r\n\r\n # get the lamp attributes\r\n parentID = dev[\"parent_device_id\"] \r\n state = dev[\"state\"][\"lamp_state\"] \r\n lastChanged = dev[\"state\"][\"last_update\"]\r\n lastUpdated = dev[\"state\"][\"last_status\"]\r\n\r\n # add lamp device to list\r\n deviceList.append({\r\n \"type\": deviceType,\r\n \"id\": deviceID,\r\n \"parent_id\": parentID,\r\n \"description\": description,\r\n \"state\": state,\r\n \"last_changed\": lastChanged,\r\n \"last_updated\": lastUpdated\r\n })\r\n \r\n return deviceList\r\n \r\n elif response.status_code == 401:\r\n \r\n self._logger.error(\"There was an authentication error with the MyQ account: %s\", _parseResponseMsg(response))\r\n return None\r\n\r\n else:\r\n \r\n self._logger.error(\"Error retrieving device list: %s\", _parseResponseMsg(response))\r\n return None\r\n\r\n else:\r\n # Error logged in _callAPI function\r\n return None\r\n\r\n else:\r\n # Check token failed - wait and see if next call successful\r\n return None", "def devices_list_view(request):\n return read_json(request.registry.settings['devices_path'], [])", "def list_profiles(self, params):\n return self.profiles", "async def find_devices() -> List[DeviceInfo]:\n return await Discovery.search_devices()", "def device_list(self, plant_id):\n return self.plant_info(plant_id)['deviceList']", "def devices(self):\n return self.enumerate_devices()", "def get_all_devices(self):\n\t\treturn handle_to_object(call_sdk_function('PrlVmCfg_GetAllDevices', self.handle))", "def devices(self):\n return list(self._device_types)" ]
[ "0.749383", "0.7381647", "0.72927547", "0.7277462", "0.71753764", "0.7086246", "0.70075804", "0.6882325", "0.68772906", "0.68372095", "0.68343604", "0.680703", "0.67837983", "0.6698722", "0.66796744", "0.6654076", "0.66497344", "0.6642055", "0.66392237", "0.6628788", "0.66121614", "0.65615726", "0.6560672", "0.653366", "0.64994776", "0.6495782", "0.6463885", "0.6450247", "0.643703", "0.640489" ]
0.82288843
0
Returns the image without atomic prefixes used to map to skopeo args.
def image(self): image = self._image for remove in ('oci:', 'http:', 'https:'): if image.startswith(remove): image = image.replace(remove, '') return image
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def no_bin(image, *args, **kwargs):\n return image", "def no_img(texto):\n return sub_no_img.sub(\"\", texto)", "def unprocess(image):\n return image + MEAN_PIXEL", "def image(images):\n return images[0]", "def image_cleaner(self) -> Optional[pulumi.Input['ManagedClusterSecurityProfileImageCleanerArgs']]:\n return pulumi.get(self, \"image_cleaner\")", "def _getFullPath(self):\n\n if not self.plateifu:\n return None\n\n plate, ifu = self.plateifu.split('-')\n dir3d = self._get_image_dir()\n\n name = 'mangaimage'\n\n return super(Image, self)._getFullPath(name, ifu=ifu, dir3d=dir3d,\n drpver=self._drpver, plate=plate)", "def get_squeeze_image(self):\n return self.squeeze_image", "def base_image(self) -> Optional[pulumi.Input['BasisArgs']]:\n return pulumi.get(self, \"base_image\")", "def image(self):\n return self.any_image(-1)", "def delete_image_builder(Name=None):\n pass", "def small_image(self) -> Optional[str]:\n return pulumi.get(self, \"small_image\")", "def strip(self):\n result = library.MagickStripImage(self.wand)\n if not result:\n self.raise_exception()", "def wiki_image(pagetext):\n images = [i for i in pagetext.images if i not in EXCLUDED_IMAGES]\n if len(images) > 0:\n return images[0]\n else:\n return ''", "def get_normal_image(image_path):\n resized_images = slice_and_resize(image_path)\n\n normal_full_img = join_images_horizontally(resized_images)\n\n folder = \"static/images/panorama\"\n\n name = next(tempfile._get_candidate_names())\n normal_path = \"%s/%s_resized.png\" % (folder, name)\n normal_full_img.save(normal_path)\n\n return normal_path", "def GET_upload_sr_img(self, *a, **kw):\r\n return \"nothing to see here.\"", "def name(self, strippath=False):\n return _image.image_name(self, strippath)", "def stop_image_builder(Name=None):\n pass", "def _DetermineImageFromArgs(self, args):\n if args.tag:\n if (properties.VALUES.builds.check_tag.GetBool() and\n 'gcr.io/' not in args.tag):\n raise c_exceptions.InvalidArgumentException(\n '--tag',\n 'Tag value must be in the gcr.io/* or *.gcr.io/* namespace.')\n return args.tag\n\n elif args.image:\n if (properties.VALUES.builds.check_tag.GetBool() and\n 'gcr.io/' not in args.image):\n raise c_exceptions.InvalidArgumentException(\n '--image',\n 'Image value must be in the gcr.io/* or *.gcr.io/* namespace.')\n return args.image\n\n else: # Default tag\n if args.app_name:\n default_name = args.app_name\n elif os.path.isdir(args.source): # I.e., the source is not a tarball\n default_name = os.path.basename(os.path.abspath(args.source))\n else:\n raise c_exceptions.OneOfArgumentsRequiredException(\n ['--app-name', '--tag'],\n 'Cannot resolve default container image. Provide an app name with '\n '--app-name to use as the container image, or provide a full '\n 'tag using --tag.')\n\n if args.app_version:\n default_tag = args.app_version\n elif git.IsGithubRepository(\n args.source) and not git.HasPendingChanges(args.source):\n default_tag = git.GetGitHeadRevision(args.source)\n if not default_tag:\n raise c_exceptions.OneOfArgumentsRequiredException(\n ['--app-version', '--tag'],\n 'Cannot resolve default container tag using the Git commit SHA. '\n 'Provide an app version with --app-version to use as the '\n 'container tag, or provide a full tag using --tag.')\n else:\n raise c_exceptions.OneOfArgumentsRequiredException(\n ['--app-version', '--tag'],\n 'Cannot resolve default container tag. '\n 'Provide an app version with --app-version to use as the '\n 'container tag, or provide a full tag using --tag.')\n\n return 'gcr.io/$PROJECT_ID/{name}:{tag}'.format(\n name=default_name, tag=default_tag)", "def image(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"image\")", "def Root(self, *args):\n return _BRepAlgo.BRepAlgo_Image_Root(self, *args)", "def custom_image(self) -> Optional[str]:\n return pulumi.get(self, \"custom_image\")", "def s2eexi_image(node, key_image, _paren_if_fun, _paren_if_app):\n return quantified_exp_image(node, key_image, open_close=(\"[\", \"]\"))", "def standard_image(img_name):\n clout = CommandLine(\n \"which afni\",\n ignore_exception=True,\n resource_monitor=False,\n terminal_output=\"allatonce\",\n ).run()\n if clout.runtime.returncode != 0:\n return None\n\n out = clout.runtime.stdout\n basedir = os.path.split(out)[0]\n return os.path.join(basedir, img_name)", "def s2euni_image(node, key_image, _paren_if_fun, _paren_if_app):\n return quantified_exp_image(node, key_image, open_close=(\"{\", \"}\"))", "def create_molns_image(self):\n file_to_remove = None\n try:\n dockerfile, file_to_remove = self._create_dockerfile(installSoftware.InstallSW.get_command_list())\n image_id = self.docker.build_image(dockerfile)\n return image_id\n except Exception as e:\n logging.exception(e)\n raise ProviderException(\"Failed to create molns image: {0}\".format(e))\n finally:\n if file_to_remove is not None:\n os.remove(file_to_remove)", "def getimage(self):", "def Remove(self, *args):\n return _BRepAlgo.BRepAlgo_Image_Remove(self, *args)", "def strip_exif(self,img):\n data = list(img.getdata())\n image_without_exif = PIL.Image.new(img.mode, img.size)\n image_without_exif.putdata(data)\n return image_without_exif", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")" ]
[ "0.6517635", "0.6052293", "0.60255784", "0.5684926", "0.5681953", "0.5605083", "0.55945504", "0.5563343", "0.5490649", "0.54342526", "0.54245436", "0.54207134", "0.53968036", "0.537718", "0.5360237", "0.53349835", "0.5334659", "0.53315836", "0.53258747", "0.53241473", "0.53044456", "0.5301562", "0.52950084", "0.5287947", "0.5286992", "0.5262675", "0.5262404", "0.52580464", "0.52572536", "0.52572536" ]
0.6296732
1
Execute an action on the signin API.
def _action(self, action, data=None, api="signin"): if not data: data = {} data['action'] = action # data['redirect_uri'] = self._REDIRECT_URL data['csrf'] = self._csrf_token() print(data) r = self.session()._post( "https://signin.aws.amazon.com/{0}".format(api), data=data, ) if r.status_code != 200: print(r.text) raise Exception("failed action {0}".format(action)) out = json.loads(r.text) if out['state'].lower() != 'success': if 'Message' in out['properties']: raise Exception("failed action {0}: {1}".format(action, out['properties']['Message'])) else: raise Exception("failed action {0}".format(action)) return out['properties']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sign_in(self) -> pulumi.Output['outputs.ServiceSignIn']:\n return pulumi.get(self, \"sign_in\")", "def initiateAuthentication(identity_url, return_to=None):", "def signin():\n scope = request.args.get(\n 'scope',\n 'identify')\n discord = make_session(scope=scope.split(' '))\n authorization_url, state = discord.authorization_url(AUTHORIZATION_BASE_URL)\n session['oauth2_state'] = state\n return redirect(authorization_url)", "def petition(handler):\n # client_object = Clients()\n sound_cloud_client = Clients().sound_cloud_client()\n handler.redirect(sound_cloud_client.authorize_url())", "def do_login(self, backend, user):", "async def sign_in(self, username: str, password: str) -> None:\n try:\n await self._api.call('system', 'sign_in', un=username, pw=password)\n except CommandFailedError as ex:\n raise SignInFailedError('HEOS sign-in failed', ex.result) from ex", "def run(args, api, settings):\n\n execute_with_authenticated_user(api, (lambda: args.func(args)), settings)", "def signIn():\n\n email = request.form.get(\"email\")\n password = request.form.get(\"password\")\n \n d = collections.defaultdict(dict)\n response = {\n \"status\" : \"ok\",\n \"notices\" : d,\n \"isLoggedIn\" : False\n }\n\n #TODO: Handle Error Messages status: \"error\" error: {code/msg}\n notices = confirm_signIn_info(email, password, app)\n\n identity = jwt.authentication_callback(email, password)\n\n if identity:\n access_token = jwt.jwt_encode_callback(identity)\n response[\"access_token\"] = access_token\n else:\n notices['JWT Error'] = 'Invalid Credentials'\n\n if len(notices) > 0: \n response[\"notices\"] = notices \n else: \n add_to_session(email)\n response[\"isLoggedIn\"] = True\n\n return jsonify(response)", "def do_process(self, request):\n oidconsumer = self.oidconsumer\n\n # retrieve the token from the environment (in this case, the URL)\n token = request['query'].get('token', '')\n\n # Ask the library to check the response that the server sent\n # us. Status is a code indicating the response type. info is\n # either None or a string containing more information about\n # the return type.\n status, info = oidconsumer.completeAuth(token, request['query'])\n\n css_class = 'error'\n openid_url = None\n if status == consumer.FAILURE and info:\n # In the case of failure, if info is non-None, it is the\n # URL that we were verifying. We include it in the error\n # message to help the user figure out what happened.\n openid_url = info\n fmt = \"Verification of %s failed.\"\n message = fmt % (cgi.escape(openid_url),)\n elif status == consumer.SUCCESS:\n # Success means that the transaction completed without\n # error. If info is None, it means that the user cancelled\n # the verification.\n css_class = 'alert'\n if info:\n # This is a successful verification attempt. If this\n # was a real application, we would do our login,\n # comment posting, etc. here.\n openid_url = info\n if self.url_to_username:\n username = self.url_to_username(request['environ'], openid_url)\n else:\n username = openid_url\n if 'paste.auth_tkt.set_user' in request['environ']:\n request['environ']['paste.auth_tkt.set_user'](username)\n if not self.login_redirect:\n fmt = (\"If you had supplied a login redirect path, you would have \"\n \"been redirected there. \"\n \"You have successfully verified %s as your identity.\")\n message = fmt % (cgi.escape(openid_url),)\n else:\n # @@: This stuff doesn't make sense to me; why not a remote redirect?\n request['environ']['paste.auth.open_id'] = openid_url\n request['environ']['PATH_INFO'] = self.login_redirect\n return self.app(request['environ'], request['start'])\n #exc = httpexceptions.HTTPTemporaryRedirect(self.login_redirect)\n #return exc.wsgi_application(request['environ'], request['start'])\n else:\n # cancelled\n message = 'Verification cancelled'\n else:\n # Either we don't understand the code or there is no\n # openid_url included with the error. Give a generic\n # failure message. The library should supply debug\n # information in a log.\n message = 'Verification failed.'\n\n return self.render(request, message, css_class, openid_url)", "def log_in_button_click(self):\n waiter.find_element(self.driver, LOG_IN_BUTTON_XPATH, by=XPATH).click()", "def _login(self, *args, **kwargs):\n pass", "def do_login(self):\n self.content = self._login()\n if self.with_tags:\n self.rest_content = self._login_vapi()", "def signin():\n auth_service = AuthService()\n form = SignInForm()\n if request.method == 'GET':\n return render_template('auth/signin.html', title='Sign In', form=form)\n\n elif request.method == 'POST':\n if form.validate_on_submit():\n user_dto = UserDto(form.email.data)\n try:\n user = auth_service.load_user(user_dto)\n if user.password == form.password.data:\n flash(\"Logged in successfully\")\n login_user(user)\n return redirect(url_for('home.welcome'))\n else:\n flash(\"Incorrect password\")\n return redirect(url_for('auth.signin'))\n except InvalidUserException:\n return (\"User does not exist\")\n flash('SignIn failed')\n return render_template('auth/signin.html', title='Sign In', form=form)", "def signin(self, method=\"POST\", email=\"[email protected]\", password=\"p4ssw0rd\",\r\n next=None):\r\n url = '/account/signin'\r\n if next is not None:\r\n url = url + '?next=' + next\r\n if method == \"POST\":\r\n return self.app.post(url, data={'email': email,\r\n 'password': password},\r\n follow_redirects=True)\r\n else:\r\n return self.app.get(url, follow_redirects=True)", "def sign_in(self, email, password):\r\n signin_url = \"https://www.googleapis.com/identitytoolkit/v3/relyingparty/verifyPassword?key=\" + self.wak\r\n signin_payload = {\"email\": email, \"password\": password, \"returnSecureToken\": True}\r\n signin_request = requests.post(signin_url, data=signin_payload)\r\n sign_up_data = json.loads(signin_request.content.decode())\r\n app = App.get_running_app()\r\n\r\n if signin_request.ok == True:\r\n refresh_token = sign_up_data['refreshToken']\r\n localId = sign_up_data['localId']\r\n print(localId)\r\n idToken = sign_up_data['idToken']\r\n\r\n # Save localId to a variable in main app class\r\n # Save idToken to a variable in main app class\r\n app.local_id = localId\r\n app.id_token = idToken\r\n\r\n app.signed_in()\r\n App.get_running_app().root.current = 'list_screen'\r\n print('true!')\r\n elif signin_request.ok == False:\r\n error_data = json.loads(signin_request.content.decode())\r\n error_message = error_data[\"error\"]['message']\r\n app.root.ids['login_screen'].ids['wrong_login'].text = error_message.replace(\"_\", \" \")", "async def async_step_auth(self, user_input=None):\n if user_input.get(const.CODE):\n self.data = user_input\n return self.async_external_step_done(next_step_id=\"finish\")\n\n profile = user_input.get(const.PROFILE)\n\n auth_client = self.get_auth_client(profile)\n\n url = auth_client.get_authorize_url()\n\n return self.async_external_step(step_id=\"auth\", url=url)", "def Login(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def post(self):\n self.parser.add_argument(\n 'email', required=True, type=self.validator.validate_string_fields, help='Enter a valid email')\n self.parser.add_argument(\n 'password', required=True, type=self.validator.validate_string_fields, help='Password cannot be empty')\n\n user = self.parser.parse_args()\n response = self.user_models.sign_in(user['email'],\n user['password'])\n return {\"message\": response}", "def login(self):\n attrs, error = self.parse_attributes()\n if error:\n if self.login_error_callback:\n return self.login_error_callback(attrs)\n else:\n raise SSOAttributeError\n\n sso_logged_in.send(current_app._get_current_object(), attributes=attrs)\n\n if self.login_callback:\n return self.login_callback(attrs)", "def auth_callback():\n if not session.get('authed', False):\n flash(\"Please log in.\")\n return redirect(my_url('index'))\n if request.args.has_key('account'):\n redis_client.sadd('%s-accounts' % session['phone'], request.args['account'])\n flash(\"Account added!\")\n return redirect(my_url('accounts'))", "def _dispatch(self, api):\n self._authorize(api)\n self._handle(api)", "def authorize():\n token = oauth.tapkey.authorize_access_token()\n session['auth'] = token\n return redirect(url_for('owner_account_chooser'))", "def start_auth(request):\n # create the client to Indivo\n client = get_indivo_client(request, with_session_token=False)\n \n # do we have a record_id?\n record_id = request.GET.get('record_id', None)\n carenet_id = request.GET.get('carenet_id', None)\n \n # prepare request token parameters\n params = {'oauth_callback':'oob'}\n if record_id:\n params['indivo_record_id'] = record_id\n if carenet_id:\n params['indivo_carenet_id'] = carenet_id\n\n # request a request token\n req_token = client.fetch_request_token(params)\n\n # store the request token in the session for when we return from auth\n request.session['request_token'] = req_token\n \n # redirect to the UI server\n return HttpResponseRedirect(client.auth_redirect_url)", "def sign_in(self) -> Optional[pulumi.Input['ServiceSignInArgs']]:\n return pulumi.get(self, \"sign_in\")", "def sign_in(self) -> Optional[pulumi.Input['ServiceSignInArgs']]:\n return pulumi.get(self, \"sign_in\")", "def login(self, request):\n request.session['state'] = state = uuid.uuid4().hex\n auth_url = flat_url(\n PROVIDER_AUTH_URL,\n client_id=self.consumer_key,\n response_type='code',\n state=state\n )\n return HTTPFound(location=auth_url)", "def signin_success(request, identity_url, openid_response):\n\n logging.debug('')\n openid_data = util.from_openid_response(openid_response) #create janrain OpenID object\n request.session['openid'] = openid_data\n\n provider_name = util.get_openid_provider_name(openid_data.openid)\n user = authenticate(\n identifier = openid_data.openid,\n provider_name = provider_name,\n method = 'openid'\n )\n\n next_url = get_next_url(request)\n\n request.session['email'] = openid_data.sreg.get('email', '')\n request.session['username'] = openid_data.sreg.get('nickname', '')\n\n return finalize_generic_signin(\n request = request,\n user = user,\n user_identifier = openid_data.openid,\n login_provider_name = provider_name,\n redirect_url = next_url\n )", "def login(self):\n r = self._login_token()", "def oauth_start_flow():\n # Have to do authentication!\n rest.default_user_authentication()\n\n account_type = flask.request.args.get('type')\n if account_type is None:\n flask.abort(400)\n\n cls = ACCOUNT_TYPES.get(account_type, None)\n if cls is None:\n flask.about(400)\n\n key = str(uuid.uuid4())\n instance = cls(id=key)\n instance.put()\n\n return flask.redirect(instance.AUTH_URL %\n {'client_id': instance.CLIENT_ID,\n 'state': key})", "def action():\n\n try:\n\n # Get the token for this authentication (rendered in a hidden input\n # field, see authentication/index.html template).\n token = request.form['token']\n\n # Get an instance of the Authentication class.\n auth = get_restpki_client().get_authentication()\n\n # Call the complete_with_webpki() method with the token, which finalizes\n # the authentication process. The call yields a ValidationResults\n # object, which denotes whether the authentication was successful or not\n # (we'll use it to render the page accordingly, see below).\n result = auth.complete_with_webpki(token)\n\n vr_html = str(result.validation_results)\n vr_html = vr_html.replace('\\n', '<br/>')\n vr_html = vr_html.replace('\\t', '&nbsp;&nbsp;&nbsp;&nbsp;')\n\n user_cert = None\n if result.validation_results.is_valid:\n # At this point, you have assurance that the certificate is valid\n # according to the SecurityContext specified on the method\n # start_with_webpki() and that the user is indeed the certificate's\n # subject. Now, you'd typically query your database for a user that\n # matches one of the certificate's fields, such as\n # user_cert.emailAddress or user_cert.pki_brazil.cpf (the actual\n # field to be used as key depends on your application's business\n # logic) and set the user as authenticated with whatever web\n # security framework your application uses. For demonstration\n # purposes, we'll just render the user's certificate information.\n user_cert = result.certificate\n\n return render_template('authentication/action.html',\n valid=result.validation_results.is_valid,\n vr_html=vr_html,\n user_cert=user_cert)\n\n except Exception as e:\n return render_template('error.html', msg=e)" ]
[ "0.620925", "0.60264295", "0.60104525", "0.5591886", "0.5576621", "0.55588895", "0.55383855", "0.5535826", "0.55275285", "0.55239755", "0.5506689", "0.55012316", "0.549917", "0.54822266", "0.5477129", "0.5430199", "0.54202974", "0.5412541", "0.5411632", "0.5404519", "0.5398512", "0.53894264", "0.5381753", "0.53766614", "0.53766614", "0.5375453", "0.5373824", "0.53677434", "0.53513825", "0.53438973" ]
0.6361719
0
checksum for nmea string if there is no checksum present for simplified arduino input The check sum should be appended to the original NMEA sentence and return
def checkSum(nmea_string): # take string after $ nmea_str = re.sub(r'^\$(.*)$', r'\1', nmea_string) # clear whitespace nmea_str = re.sub(r'\s', '', nmea_str) checksum = 0 # initialize for b in nmea_str: checksum ^= ord(b) # xor # need to remove the front '0x' from the import hex number return(nmea_string + "*" + re.sub(r'^0x', '', hex(checksum)).zfill(2))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def checksum(s):\n result = re.search('\\$(.*)\\*', s) # everything between '$' and '*' (escaped with '\\')\n\n # https://rietman.wordpress.com/2008/09/25/how-to-calculate-the-nmea-checksum/\n # see also https://forum.u-blox.com/index.php/14618/python-generate-checksums-validate-coming-serial-interface\n\n checksum = 0\n for thing in result.group(1):\n checksum = checksum ^ ord(thing) # Xor\n\n ck = hex(0x100 + checksum)[-2:].upper()\n return ck", "def calculate_checksum(self, data):\n\t\tdata = data[2:] # Ignore start tokens ($$)\n\t\tcrc16 = crcmod.predefined.mkCrcFun('crc-ccitt-false')\n\t\treturn hex(crc16(data))[2:].upper().zfill(4)", "def do_checksum(source_string):\n sum = 0\n max_count = 3\n count = 0\n while count < max_count:\n val = ord(source_string[count + 1]) * 256 + ord(source_string[count])\n sum = sum + val\n sum = sum & 0xffffffff\n count = count + 2\n if max_count < len(source_string):\n sum = sum + ord(source_string[len(source_string) - 1])\n sum = sum & 0xffffffff\n\n sum = (sum >> 16) + (sum & 0xffff)\n sum = sum + (sum >> 16)\n answer = ~sum\n answer = answer & 0xffff\n answer = answer >> 8 | (answer << 8 & 0xff00)\n print(answer)\n return answer", "def checksum (upc):\n\n # check type of input\n # raise TypeError if not string\n\n # xxxxxxxxxxx x\n # check length of string\n # raise ValueError if not 12\n\n # convert string to array\n # generate checksum using the first 11 digits provided\n # check against the the twelfth digit\n # result of first 11 digits must be consistent with the value of the 12th digit\n # value must be number\n\n # return True if they are equal, False otherwise\n num = []\n #\"123456\" --> \"1\" \"2\" \"3\" \"4\" \"5\" \"6\" --> num = [1,2,3,4,5,6] --> num[0] = 1, num[3] = 4\n if type(upc) is str:\n for i in range(0, len(upc)):\n try:\n num.append(int(upc[i]))\n except ValueError:\n raise ValueError(\"Not correct length\")\n # if upc[i] is not number checksum('1b2')\n else:\n raise TypeError(\"Invalid type passed as parameter\")\n #raiseError\n\n if len(num) != 12:\n raise ValueError(\"Not correct length\")\n\n\n odd, even = num[::2], num[1::2]\n result = 0\n for i in range(0,len(odd)):\n result = result + odd[i]\n\n result *= 3\n\n # This is to add even numbered digits\n for i in range(0, (len(even)-1)):\n result = result + even[i]\n\n result %= 10\n if result != 0:\n result = 10 - result\n\n if result == num[11]:\n return True\n\n return False", "def checksum(data: str):\n if len(data) % 2 == 1:\n return data\n it = iter(data)\n new_data = ''\n for bit in it:\n if bit == next(it): # two consecutive characters are the same\n new_data += '1'\n else:\n new_data += '0'\n return checksum(new_data)", "def calculate_checksum(code):\n\n sum_odd = reduce(sum_chars, code[::2])\n sum_even = reduce(sum_chars, code[1:-1:2])\n check = (sum_even + sum_odd * 3) % 10\n\n if check == 0:\n return 0\n else:\n return 10 - check", "def checksum(self,msg):\n cksum = sum([ord(x) for x in msg])\n cksum0 = ((cksum & 0xF0) >> 4) + 0x30\n cksum1 = (cksum & 0x0F) + 0x30\n return chr(cksum0)+chr(cksum1)", "def _get_checksum(self, text):\n # Compute the new checksum over everything but the sha1sum line.\n # This will fail if sha1sum appears for some other reason. It won't ;-)\n text = \"\".join([line for line in text.splitlines(True) if \"sha1sum\" not in line])\n return utils.str_checksum(text)", "def checksum(n):\n\n # Compute the sum of the non-check digits.\n s = sum(luhn_digits(n * 10))\n\n # Multiply by 9.\n result = s * 9\n\n # The units digit is the check digit\n check_digit = result % 10\n\n m = int(str(n) + str(check_digit))\n assert(verify(m))\n\n return check_digit", "def correct_checksum():\n test_strs = [\"ch3ck1nG c0rr3ct ch3cksu|\\/|\\n\", \"y3T an0th3r str1ng0_x\\/.!&\\n\"]\n\n def test_checksum(test_str):\n server = start_server()\n client = start_client()\n\n write_to(client, test_str)\n segments = read_segments_from(client)\n if not segments:\n return False\n teardown()\n\n # Start reference solution to get answers.\n ref_server = start_server(port=REF_PORT, reference=True)\n ref_client = start_client(server_port=REF_PORT, reference=True)\n\n # Get reference checksum.\n write_to(ref_client, test_str)\n ref_segment = read_segments_from(ref_client)[0]\n ref_checksum = ref_segment.checksum\n\n # Check the first sent segment.\n segment = segments[0]\n\n # Checksum equal to the reference checksum.\n if segment.checksum == ref_checksum:\n return True\n\n # Maybe they also set an ACK for this segment. Compare with the computed\n # checksum.\n return int(segment.checksum, 16) == segment.c_repr.cksum;\n\n return reduce(lambda a, b: a and b, [test_checksum(t) for t in test_strs])", "def checksum(upc):\n\n # check type of input\n if type(upc) != str:\n # raise TypeError if not string\n raise TypeError(\"Input must be a string\")\n # check length of string\n elif len(upc) != 12:\n # raise ValueError if not 12\n raise ValueError(\"Invalid UPC length\")\n # generate checksum using the first 11 digits provided\n else:\n # add the odd digits together\n odd_digits = upc[::2]\n odd_sum = sum([int(x) for x in odd_digits])\n\n # add the even digits together (12th digit not included)\n even_digits = upc[1:-1:2]\n even_sum = sum([int(x) for x in even_digits])\n\n # multiply the odd sum by 3, add that to the even sum and\n # find the modulo 10 of the result\n mod = ((odd_sum * 3) + even_sum) % 10\n\n # if the result is not 0, subtract the result from 10\n checksum_digit = 0\n if mod != 0:\n checksum_digit = 10 - mod\n\n # check against the twelfth digit\n # return True if they are equal, False otherwise\n return int(upc[11]) == checksum_digit", "def compute_checksum(data):\n\tif len(data) & 1:\n\t\tdata = data + '\\0'\n\n\tsum = 0\n\twords = array.array('h', data)\n\tfor word in words:\n\t\tsum = sum + (word & 0xffff)\n\t\t\n\thi = sum >> 16\n\tlow = sum & 0xffff\n\tsum = hi + low\n\tsum = sum + (sum >> 16)\n\treturn (~sum) & 0xffff", "def getChecksum(self, s):\n \n chksum = 0\n for ch in s:\n chksum = chksum + ord(ch)\n \n return hex(chksum%256)[2:]", "def _get_checksum(self, arg):", "def getChecksum(dataString):\n sum = 0\n count_to = (len(dataString) / 2) * 2\n count = 0\n while count < count_to:\n this_val = ord(dataString[count + 1])*256+ord(dataString[count])\n sum = sum + this_val\n sum = sum & 0xffffffff # Necessary?\n count = count + 2\n if count_to < len(dataString):\n sum = sum + ord(dataString[len(dataString) - 1])\n sum = sum & 0xffffffff # Necessary?\n sum = (sum >> 16) + (sum & 0xffff)\n sum = sum + (sum >> 16)\n answer = ~sum\n answer = answer & 0xffff\n # Swap bytes. Bugger me if I know why.\n answer = answer >> 8 | (answer << 8 & 0xff00)\n return answer", "def checksum(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"checksum\")", "def checksum(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"checksum\")", "def _checksum(source_string):\n if (len(source_string) % 2):\n source_string += \"\\x00\"\n converted = array.array(\"H\", source_string)\n if sys.byteorder == \"big\":\n converted.bytewap()\n val = sum(converted)\n\n val &= 0xffffffff # Truncate val to 32 bits (a variance from ping.c, which\n # uses signed ints, but overflow is unlikely in ping)\n\n val = (val >> 16) + (val & 0xffff) # Add high 16 bits to low 16 bits\n val += (val >> 16) # Add carry from above (if any)\n answer = ~val & 0xffff # Invert and truncate to 16 bits\n answer = socket.htons(answer)\n\n return answer", "def checksum(message):\n check = 0\n for c in message:\n check += ord(c)\n return check % 256", "def doChecksum(line):\n return sum(map(int, filter(lambda c: c >= '0' and c <= '9', line[:-1].replace('-','1')))) % 10", "def __checksum_make(self, data):\n self.logger.info(\"{}: building the checksum for bytes {}.\".format(self.sensor_name, \":\".join(\"%02x\" % b for b in data)))\n\n if len(data) not in (self.__CommandLength - 2, self.__ResponseLength - 2):\n raise ValueError(\"{}: length data has to be {} or {}.\".format(self.sensor_name, self.__CommandLength - 2, self.__ResponseLength))\n\n if data[0] != self.__SerialStart:\n raise ValueError(\"{}: data is missing the start byte.\".format(self.sensor_name))\n\n if data[1] not in (self.__SendByte, self.__ResponseByte, self.__ReceiveByte):\n raise ValueError(\"{}: data is missing SendByte, ReceiveByte or ReceiveValue-Byte\".format(self.sensor_name))\n\n if data[1] != self.__ReceiveByte and data[2] not in command.values():\n raise ValueError(\"{}: the data command byte value \\\"{}\\\" is not valid.\".format(self.sensor_name, data[2]))\n\n # Build checksum for data to send or receive\n checksum = 0\n for i in range(2, len(data)):\n checksum = checksum + data[i]\n checksum = checksum % 256\n\n self.logger.info(\"{}: checksum calculated {} for bytes {}.\".format(self.sensor_name, \"%02x\" % checksum, \":\".join(\"%02x\" % b for b in data)))\n return checksum", "def __calculate_checksum(cls, number) -> str:\n # TODO in future stages, this function will use the Luhn algorithm to create checksum\n return str(sum(int(num) for num in str(number)) % 10)", "def test_right_checksum(self):\n self.assertEqual(utils.checksum('fooo'), 'L')", "def ean_checksum(eancode):\n if len(eancode) <> 13:\n return -1\n oddsum=0\n evensum=0\n total=0\n eanvalue=eancode\n reversevalue = eanvalue[::-1]\n finalean=reversevalue[1:]\n\n for i in range(len(finalean)):\n if i % 2 == 0:\n oddsum += int(finalean[i])\n else:\n evensum += int(finalean[i])\n total=(oddsum * 3) + evensum\n\n check = int(10 - math.ceil(total % 10.0)) %10\n return check", "def checksum(data, sum=0):\n # make 16 bit words out of every two adjacent 8 bit words in the packet\n # and add them up\n data = str(data)\n \n\n for i in range(0, len(data), 2):\n if i + 1 >= len(data):\n sum += ord(data[i]) & 0xFF\n else:\n w = ((ord(data[i]) << 8) & 0xFF00) + (ord(data[i + 1]) & 0xFF)\n sum += w\n\n # take only 16 bits out of the 32 bit sum and add up the carries\n while (sum >> 16) > 0:\n sum = (sum & 0xFFFF) + (sum >> 16)\n\n # one's complement the result\n sum = ~sum\n\n return sum & 0xFFFF", "def _get_cksum(packet):\n if len(packet) & 1:\n packet = packet + '\\0'\n datas = array.array('h', packet)\n sum = 0\n for data in datas:\n sum += (data & 0xffff)\n hi = sum >> 16\n lo = sum & 0xffff\n sum = hi + lo\n sum = sum + (sum >> 16)\n return (~sum) & 0xffff", "def addChecksum(s):\n if len(s) < 1:\n raise ValueError, \"The provided string needs to be atleast 1 byte long\"\n return (_calcChecksum(s) + s)", "def calculate_checksum(self, message):\n s = 0\n for i in range(0, len(message)-1, 2):\n w = (message[i]) + (message[i + 1] << 8) << 8\n s = ((w + s) & 0xffff) + ((w + s) >> 16)\n return s", "def calculate_checksum(source_string):\n countTo = (int(len(source_string) / 2)) * 2\n sum = 0\n count = 0\n\n # Handle bytes in pairs (decoding as short ints)\n loByte = 0\n hiByte = 0\n while count < countTo:\n if (byteorder == \"little\"):\n loByte = source_string[count]\n hiByte = source_string[count + 1]\n else:\n loByte = source_string[count + 1]\n hiByte = source_string[count]\n sum = sum + (ord(hiByte) * 256 + ord(loByte))\n count += 2\n\n # Handle last byte if applicable (odd-number of bytes)\n # Endianness should be irrelevant in this case\n if countTo < len(source_string): # Check for odd length\n loByte = source_string[len(source_string) - 1]\n sum += ord(loByte)\n\n sum &= 0xffffffff # Truncate sum to 32 bits (a variance from ping.c, which\n # uses signed ints, but overflow is unlikely in ping)\n\n sum = (sum >> 16) + (sum & 0xffff) # Add high 16 bits to low 16 bits\n sum += (sum >> 16) # Add carry from above (if any)\n answer = ~sum & 0xffff # Invert and truncate to 16 bits\n answer = socket.htons(answer)\n\n return answer", "def checksum(source_string):\n the_sum = 0\n count_to = (len(source_string)/2)*2\n count = 0\n while count < count_to:\n this_val = ord(source_string[count + 1])*256 + ord(source_string[count])\n the_sum = the_sum + this_val\n the_sum = the_sum & 0xffffffff # Necessary?\n count = count + 2\n\n if count_to<len(source_string):\n the_sum = the_sum + ord(source_string[len(source_string) - 1])\n the_sum = the_sum & 0xffffffff # Necessary?\n\n the_sum = (the_sum >> 16) + (the_sum & 0xffff)\n the_sum = the_sum + (the_sum >> 16)\n answer = ~the_sum\n answer = answer & 0xffff\n\n # Swap bytes. Bugger me if I know why.\n answer = answer >> 8 | (answer << 8 & 0xff00)\n\n return answer" ]
[ "0.6902954", "0.67252344", "0.65304875", "0.6492474", "0.6474358", "0.6465052", "0.6324542", "0.61919427", "0.6151632", "0.61403924", "0.6118252", "0.6098456", "0.5994872", "0.595297", "0.59509265", "0.5938676", "0.5938676", "0.59314114", "0.59124553", "0.5904448", "0.5897225", "0.5889876", "0.5887578", "0.5880952", "0.5866323", "0.5858726", "0.58501256", "0.5837509", "0.579501", "0.5749441" ]
0.77633667
0
sign(x) = 1 when x >=0, sign(x) = 1 when x < 0
def sign(x): if x >= 0: return 1 else: return -1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sign(x):\n if x >= 0:\n return 1\n return -1", "def sign(a):\n return (a > 0) - (a < 0)", "def sign(a) :\n return (a>0) - (a<0)", "def sign(num: float):\n return 1 if num >= 0 else -1", "def sign(x):\n if x < 0.0:\n sign = -1\n elif x == 0.0:\n sign = 0\n elif x > 0.0:\n sign = 1\n return sign", "def sign(v):\n return np.where(v < 0, -1.0, 1.0)", "def sign(x):\n return(copysign(1, x))", "def sign(n):\n return (n > 0) - (n < 0)", "def signum(x: float) -> float:\n if x < 0:\n return -1.0\n elif x > 0:\n return 1.0\n return 0.0", "def sign(n: float) -> int:\n return 1 if n > 0 else -1", "def signum ( x ) :\n ### for integers\n from ostap.core.ostap_types import is_integer as _is_integer \n if _is_integer ( x ) : return 0 if 0 == x else +1 if 0<x else -1\n ## for floating numbers\n return 0 if iszero ( x ) else +1 if 0 < x else -1", "def sign(d):\n if d > 0:\n return 1\n if d == 0:\n return 0\n if d < 0:\n return -1\n return None", "def _sign(a):\n if a == 0.0:\n return 1\n else:\n return a/abs(a)", "def samesign ( a , b ) :\n return ( 0 < a and 0 < b ) or ( 0 > a and 0 > b )", "def sign(self):\n return 1 - 2 * self._ltz()", "def _sign(self, number):\n return cmp(number,0)", "def invert0(x):\n return 0 if x > 0 else 1", "def signal(x):\r\n if x >= 0.0:\r\n return 1.0\r\n return -1.0", "def sign_st(x):\n from tframe import hub as th\n def sign(v):\n return (tf.cast(tf.math.greater_equal(v, 0), th.dtype) - 0.5) * 2\n def grad(dy):\n return dy * tf.cast(tf.logical_and(\n tf.greater_equal(x, -1.0), tf.less_equal(x, 1.0)), dtype=th.dtype)\n return sign(x), grad", "def samesign(a, b):\n return a * b > 0", "def isnegative(x):\n if x < 0 :\n return True\n return False", "def positive(num):\n return num if num > 0.0 else 0.0", "def positive(num):\n return num if num > 0.0 else 0.0", "def nonzero_sign(\n x: type_alias.TensorLike,\n name: str = 'nonzero_sign') -> tf.Tensor:\n with tf.name_scope(name):\n x = tf.convert_to_tensor(value=x)\n\n one = tf.ones_like(x)\n return tf.where(tf.greater_equal(x, 0.0), one, -one)", "def absolute(x):\n return -x if x < 0 else x", "def positive(x):\n return np.maximum(x, 0.0)", "def abs(self):\n return self * self.sign()", "def sgn(x) -> int:\n if x > 0:\n return 1\n if x < 0:\n return -1\n return 0", "def _sign(self):\n \n # pylint: disable=maybe-no-member\n if self.is_zero:\n return 0\n elif not self.field:\n # representing a rational\n if self.a > 0:\n return 1\n elif self.a < 0:\n return -1\n else:\n return 0\n else:\n if self.a.is_zero:\n return self.b._sign()\n if self.b.is_zero:\n return self.a._sign()\n sa = self.a._sign()\n sb = self.b._sign()\n if sa == sb:\n return sa\n else:\n return sa * (self.a * self.a - self.r * self.b * self.b)._sign()", "def fun(self, x):\n if np.any(x < 0):\n return np.inf\n else:\n return 0" ]
[ "0.8934227", "0.8589877", "0.8557983", "0.84978384", "0.8455169", "0.82966465", "0.8226261", "0.8218827", "0.80557597", "0.7960639", "0.77862436", "0.7741318", "0.76713824", "0.74776924", "0.74519736", "0.74350107", "0.73274165", "0.73155457", "0.72797954", "0.7140799", "0.7089638", "0.69871986", "0.69871986", "0.6960254", "0.6930732", "0.6907175", "0.69062847", "0.6866786", "0.6866476", "0.68621165" ]
0.89466614
0
vector dot product, return a scalar x and y are two vectors with same length
def vector_dot(x, y): if(len(x) != len(y)): raise ValueError("vector lengths differ") else: # return x1*y1+x2*y2+...xn*yn return sum([x[i] * y[i] for i in range(len(x))])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vec_dot(x, y):\r\n return sum(a * b for a, b in zip(x, y))", "def naive_vector_dot(x, y):\n assert len(x.shape) == 1\n assert len(y.shape) == 1\n assert x.shape[0] == y.shape[0]\n\n z = 0\n for i in range(x.shape[0]):\n z += x[i] * y[i]\n return z", "def vector_dot(v1,v2):\n return (v1.x * v2.x) + (v1.y * v2.y) + (v1.z * v2.z)", "def vec_dot(v1,v2):\r\n \r\n return np.dot(v1,v2)", "def dot(vector1, vector2):\n return sum(a1 * a2 for a1, a2 in zip(vector1, vector2))", "def dot(a, b):\n\n if len(a) != len(b):\n raise Exception(\"Input vectors must be of same length, not %d and %d\" % (len(a), len(b)))\n\n return float(sum([a[i] * b[i] for i in range(len(a))]))", "def dotproduct(vec1, vec2):\n return sum((a*b) for a, b in zip(vec1, vec2))", "def dot(x,y):\n\treturn sum([xi*yi for (xi,yi) in zip(x,y)])", "def dot_product(vec_1:tuple, vec_2:tuple)->float:\n return vec_1[0] * vec_2[0] + vec_1[1] * vec_2[1]", "def dotproduct(vec1, vec2):\n import operator\n return sum(map(operator.mul, vec1, vec2))", "def dotproduct(first, other):\n if isinstance(first,FreeCAD.Vector) and isinstance(other,FreeCAD.Vector):\n return (first.x*other.x + first.y*other.y + first.z*other.z)", "def dot(a, b):\n return np.vdot(a.arr,b.arr)", "def dot_vectors(u, v):\n return u[0] * v[0] + u[1] * v[1] + u[2] * v[2]", "def dotProduct(v1, v2):\n n1 = normalize(v1)\n n2 = normalize(v2)\n return n1[0] * n2[0] + n1[1] * n2[1] + n1[2] * n2[2]", "def vdot(a, b):\n return np.vdot(a.ravel(), b.ravel())", "def dot(x, y):\n res = x[0] * y[0]\n for a, b in zip(x, y):\n res += a * b\n return res", "def dot(vector_1: List, vector_2: List) -> float:\n if len(vector_1) != len(vector_2):\n raise InvalidInput(error_code_messages[\"InvalidLength\"])\n\n return sum(x * y for x, y in zip(vector_1, vector_2))", "def dotProduct(v1, v2):\n return sum((a * b) for a, b in zip(v1, v2))", "def dot(vector01,vector02):\r\n result = 0\r\n # creates the initial value for the result of the dot product\r\n for z in range(len(vector01)):\r\n # for loop which continues as long as there are more values left in the vector \r\n result += vector01[z]*vector02[z]\r\n # the new result is found to be the corresponding values in each vector multiplied and then added together \r\n return result", "def naive_matrix_vector_dot(x, y):\n assert len(x.shape) == 2\n assert len(y.shape) == 1\n assert x.shape[1] == y.shape[0]\n\n z = np.zeros(x.shape[0])\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n z[i] += x[i, j] * y[j]\n return z", "def vecDot(a, b):\n ret=0.0\n for i in range(len(a)):\n ret+=a[i]*b[i]\n return ret", "def dot(a, b):\n return a[0]*b[0] + a[1]*b[1] + a[2]*b[2]", "def dotProduct(vectorA, vectorB):\r\n product =0\r\n for i in range(len(vectorA)):\r\n product += eval(vectorA[i])*eval(vectorB[i])\r\n return product", "def dot_product(vector1, vector2):\n out = None\n ### YOUR CODE HERE\n out=np.dot(vector1,vector2)\n ### END YOUR CODE\n\n return out", "def dot_product(v1, v2):\n return v1[0] * v2[0] + v1[1] * v2[1]", "def dot_product(v1, v2):\n return v1[0]*v2[0] + v1[1]*v2[1] + v1[2]*v2[2]", "def dot(a,b):\n return a[0]*b[0]+a[1]*b[1]+a[2]*b[2]", "def dot_product(vector1, vector2):\n return [reduce_by_multiplication(pair) for pair in zip(vector1, vector2)]", "def dotproduct(x, y):\n return sum(imap(operator.mul, x, y))", "def dotproduct(v1, v2):\n\treturn sum(imap(operator.mul, v1, v2))" ]
[ "0.86098886", "0.83120406", "0.8255133", "0.8179656", "0.80700606", "0.7998463", "0.79642713", "0.79305995", "0.7897161", "0.78932184", "0.78799474", "0.787511", "0.78657234", "0.7834769", "0.7806407", "0.7790224", "0.7761924", "0.7682441", "0.767671", "0.7645681", "0.7637908", "0.76359797", "0.76334286", "0.7630208", "0.761476", "0.7587227", "0.7555758", "0.7526806", "0.75261647", "0.7516081" ]
0.8495939
1
vector cross product, return a vector x and y are two vectors with same length returned vector z is the same length as well this time only with dim=3
def vector_cross(x, y): if(len(x) != len(y)): raise ValueError("vector lengths differ") elif(len(x) > 3): raise ValueError("vector is more than 3D") else: s = [x[1] * y[2] - x[2] * y[1], x[2] * y[0] - x[0] * y[2], x[0] * y[1] - x[1] * y[0]] return s
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cross(vec1, vec2):\n result = np.zeros(3)\n return cross_(vec1, vec2, result)", "def cross(x, y):\n x = x.reshape(3)\n y = y.reshape(3)\n z = np.cross(x, y)\n z = z.reshape((3, 1))\n return z", "def vec_cross(a,b):\r\n return [a[1] * b[2] - a[2] * b[1],\r\n a[2] * b[0] - a[0] * b[2],\r\n a[0] * b[1] - a[1] * b[0]]", "def cross(v1: Vector, v2: Vector) -> Vector: # Function is fucked TODO\n if len(v1.coords) != 3 or len(v2.coords) != 3:\n raise ValueError(\"Vectors have to be 3 fucking D, nøøb\")\n x = v1.y * v2.z - v1.z * v2.y\n y = v1.z * v2.x - v1.x * v2.z\n z = v1.x * v2.y - v1.y * v2.x\n return Vector(x, y, z)", "def cross(a, b):\n #return np.cross(a,b)\n\n return vector(a[1] * b[2] - a[2] * b[1],\n a[2] * b[0] - a[0] * b[2],\n a[0] * b[1] - a[1] * b[0])", "def cross(a, b):\n c1 = a[1]*b[2] - a[2]*b[1]\n c2 = a[2]*b[0] - a[0]*b[2]\n c3 = a[0]*b[1] - a[1]*b[0]\n return sp.array([c1,c2,c3])", "def cross_product(v1, v2):\n return cg3d_vector.CG3dVector(\n v1[1] * v2[2] - v2[1] * v1[2],\n v1[2] * v2[0] - v2[2] * v1[0],\n v1[0] * v2[1] - v2[0] * v1[1]\n )", "def crossProduct(p1, p2, p3):\n return (\n -(p1[1]*p2[0]) + p1[0]*p2[1] +\n p1[1]*p3[0] - p2[1]*p3[0] -\n p1[0]*p3[1] + p2[0]*p3[1]\n )", "def cross_(vec1, vec2, result):\n a1, a2, a3 = double(vec1[0]), double(vec1[1]), double(vec1[2])\n b1, b2, b3 = double(vec2[0]), double(vec2[1]), double(vec2[2])\n result[0] = a2 * b3 - a3 * b2\n result[1] = a3 * b1 - a1 * b3\n result[2] = a1 * b2 - a2 * b1\n return result", "def cross(v1, v2):\n return np.cross(v1, v2)", "def cross3(self, left, right):\n return np.array([left[1] * right[2] - left[2] * right[1],\n left[2] * right[0] - left[0] * right[2],\n left[0] * right[1] - left[1] * right[0]])", "def cross(self, v):\n if (len(self.mV) != 3) or (len(v) != 3):\n raise IndexError('Cross product is only for 2 3-vectors.')\n\n (x1, y1, z1) = (self.mV[0], self.mV[1], self.mV[2])\n (x2, y2, z2) = (v[0], v[1], v[2])\n x = y1 * z2 - y2 * z1\n y = z1 * x2 - z2 * x1\n z = x1 * y2 - x2 * y1\n return Vector(x, y, z)", "def cross(u,v):\n u1, u2, u3 = u\n v1, v2, v3 = v\n return np.array([u2*v3 - u3*v2,\n u3*v1 - u1*v3,\n u1*v2 - u2*v1], dtype=u.dtype)", "def test_cross_v3(self):\n\n vec1 = Vec3(1, 0, 0)\n vec2 = Vec3(0, 1, 0)\n cross = vec1.cross(vec2)\n\n expected = Vec3(0, 0, 1)\n\n self.assertEqual(cross, expected)", "def test_cross():\n assert_equal(cross(Vector(1, 0, 0), Vector(0, 1, 0)), Vector(0, 0, 1))\n assert_equal(cross(Vector(1, 3, 2), Vector(-1, 1, 0)), Vector(-2, -2, 4))", "def d_cross(a, b):\n d_cross = np.zeros((3, 3), dtype=float)\n for i in range(3):\n ei = np.zeros(3, dtype=float)\n ei[i] = 1.0\n d_cross[i] = np.cross(ei, b)\n return d_cross", "def cross(p, q):\n xyz = np.zeros(3)\n xyz[0] = p[1] * q[2] - p[2] * q[1]\n xyz[1] = p[2] * q[0] - p[0] * q[2]\n xyz[2] = p[0] * q[1] - p[1] * q[0]\n return xyz", "def cross_vectors(u, v):\n return [u[1] * v[2] - u[2] * v[1],\n u[2] * v[0] - u[0] * v[2],\n u[0] * v[1] - u[1] * v[0]]", "def cross_z(self):\n return self.v.cross(Vector((0, 0, 1)))", "def vector_cross(v, w):\n res = np.cross(v, w)\n\n if len(v) == 3:\n return Vector(*res)\n else:\n return res", "def cross_product(p0,p1,p2):\n\treturn (((p1[0]-p0[0])*(p2[1]-p0[1]))-((p2[0]-p0[0])*(p1[1]-p0[1])))", "def crossProduct( set1, set2):\n set1 = asarray( set1, _aformat(set1))\n set1 = reshape( set1, (-1, 3))\n set2 = asarray( set2, _aformat(set2))\n set2 = reshape( set2, (-1, 3))\n return cross( set1, set2 )", "def cross(a, b):\n return np.array([a[1]*b[2] - a[2]*b[1],\n a[2]*b[0] - a[0]*b[2],\n a[0]*b[1] - a[1]*b[0]])", "def cross_multiply(x):\n return (x[0][0] * x[1][1]) - (x[0][1] * x[1][0])", "def xCrossProd(self, other):\n return other.y * self.z - other.z * self.y", "def cross_product(a,b):\n return [a[1]*b[2]-a[2]*b[1], a[2]*b[0]-a[0]*b[2], a[0]*b[1]-a[1]*b[0]]", "def crossproduct(first, other=FreeCAD.Vector(0,0,1)):\n if isinstance(first,FreeCAD.Vector) and isinstance(other,FreeCAD.Vector):\n return FreeCAD.Vector(first.y*other.z - first.z*other.y, first.z*other.x - first.x*other.z, first.x*other.y - first.y*other.x)", "def crossProduct4( set1, set2 ):\n set1 = asarray( set1, _aformat(set1))\n set1 = reshape( set1, (-1, 4))\n set2 = asarray( set2, _aformat(set1))\n set2 = reshape( set2, (-1, 4))\n result = zeros( (len(set1),4), _aformat(set1))\n result[:,:3] = cross( set1[:,:3],set2[:,:3])\n result[:,3] = 1.0\n return result", "def cross(a,b):\n \n return [ a[1]*b[2] - a[2]*b[1],\n a[2]*b[0] - a[0]*b[2],\n a[0]*b[1] - a[1]*b[0],\n 1.0 ]", "def cross(self, vec):\n if not isinstance(vec, Vector3Array):\n raise TypeError('Cross product operand must be a Vector3Array')\n if self.nV != 1 and vec.nV != 1 and self.nV != vec.nV:\n raise ValueError('Cross product operands must have the same '\n 'number of elements.')\n return Vector3Array(np.cross(self, vec))" ]
[ "0.76212597", "0.74925447", "0.73110324", "0.7152654", "0.7140053", "0.71354425", "0.7123215", "0.71210593", "0.70858693", "0.7048258", "0.70462084", "0.7012079", "0.69709086", "0.69547546", "0.6936852", "0.69079554", "0.6902274", "0.68876064", "0.6840153", "0.6813516", "0.6756766", "0.67554307", "0.67518204", "0.67170393", "0.6689858", "0.66766953", "0.6665882", "0.66084355", "0.6596696", "0.65939116" ]
0.76472753
0
data acquisition of accelerator and magentometer data, LSM303D datasheet
def LSM_acquisition(add): # control register CTRL0 = 0x1f # p.34, accelerator CTRL1 = 0x20 CTRL2 = 0x21 CTRL5 = 0x24 # p.36, magnetic CTRL6 = 0x25 CTRL7 = 0x26 FIFO_CTRL = 0x2e # p.40 # accelerater OUT_X_L_A = 0x28 OUT_X_H_A = 0x29 OUT_Y_L_A = 0x2a OUT_Y_H_A = 0x2b OUT_Z_L_A = 0x2c OUT_Z_H_A = 0x2d # magentic OUT_X_L_M = 0x08 OUT_X_H_M = 0x09 OUT_Y_L_M = 0x0a OUT_Y_H_M = 0x0b OUT_Z_L_M = 0x0c OUT_Z_H_M = 0x0d # follow lsm303D arduino library # AFS = 0, +-2g scale bus.write_byte_data(add, CTRL2, 0x00) # 50 Hz AODR, all axis enable bus.write_byte_data(add, CTRL1, 0x57) # high resolution, 6.25Hz MODR bus.write_byte_data(add, CTRL5, 0x64) # +-4 gauss scale bus.write_byte_data(add, CTRL6, 0x20) # low power mode off, continuous conversion mode bus.write_byte_data(add, CTRL7, 0x00) # # FIFO mode # bus.write_byte_data(add, CTRL0, 0b01000000) # bus.write_byte_data(add, FIFO_CTRL, 0b01000000) # # accelerator with 12.5Hz, all axis enable # bus.write_byte_data(add, CTRL1, 0b00110111) # # magnetic 12.5Hz, high resolutn, temp en # bus.write_byte_data(add, CTRL5, 0b11100000) # # full scale range \pm 12 gauss # bus.write_byte_data(add, CTRL6, 0b01101000) # # enable magnetic # bus.write_byte_data(add, CTRL7, 0x00) # accelerator accumulate while True: uint16_ax = (bus.read_byte_data(add, OUT_X_H_A) << 8) + \ bus.read_byte_data(add, OUT_X_L_A) uint16_ay = (bus.read_byte_data(add, OUT_Y_H_A) << 8) + \ bus.read_byte_data(add, OUT_Y_L_A) uint16_az = (bus.read_byte_data(add, OUT_Z_H_A) << 8) + \ bus.read_byte_data(add, OUT_Z_L_A) uint16_mx = (bus.read_byte_data(add, OUT_X_H_M) << 8) + \ bus.read_byte_data(add, OUT_X_L_M) uint16_my = (bus.read_byte_data(add, OUT_Y_H_M) << 8) + \ bus.read_byte_data(add, OUT_Y_L_M) uint16_mz = (bus.read_byte_data(add, OUT_Z_H_M) << 8) + \ bus.read_byte_data(add, OUT_Z_L_M) # accelerometer 12 bit left aligned # ax = twos_comp(uint16_ax>>4, 12) # ay = twos_comp(uint16_ay>>4, 12) # az = twos_comp(uint16_az>>4, 12) ax = twos_comp(uint16_ax, 16) ay = twos_comp(uint16_ay, 16) az = twos_comp(uint16_az, 16) mx = twos_comp(uint16_mx, 16) my = twos_comp(uint16_my, 16) mz = twos_comp(uint16_mz, 16) yield [ax, ay, az, mx, my, mz]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def obtain_data(self):\n ##MODIFY THIS\n #ipdb.set_trace()\n print('obtain_data')\n print(self.enabler)\n print(self.index)\n helper = '>'+str(1+int(self.chann_span.get()))+'Q'\n print('helper='+helper)\n while(self.enabler):\n #print('size'+str(1+int(self.chann_span.get())))\n #print('offset'+str(self.index-self.index_offset))\n A2 = struct.unpack(helper, fpga.read('A2', (1+int(self.chann_span.get()))*8, ((self.index-self.index_offset)*8))) \n #print(A2)\n #print(str(10*np.log10(A2))+'dB')\n self.amp_a2[0] = np.mean(A2)\n self.amp_a2 = np.roll(self.amp_a2, -1)\n B2 = struct.unpack(helper, fpga.read('B2', (1+int(self.chann_span.get()))*8, ((self.index-self.index_offset)*8)))\n self.amp_b2[0] = np.mean(B2)\n self.amp_b2 = np.roll(self.amp_b2, -1)\n AB_re = struct.unpack(helper, fpga.read('AB_re', (1+int(self.chann_span.get()))*8, ((self.index-self.index_offset)*8)))\n self.ab_re[0] = np.mean(AB_re)\n self.ab_re = np.roll(self.ab_re, -1)\n AB_im = struct.unpack(helper, fpga.read('AB_im', (1+int(self.chann_span.get()))*8, ((self.index-self.index_offset)*8)))\n self.ab_im[0] = np.mean(AB_im)\n self.ab_im = np.roll(self.ab_im, -1) \n # print('RE:' + str(self.ab_re)+ '\\t IM:' +str(self.ab_im))\n log_a = 10*np.log10(np.mean(self.amp_a2)+1.0)\n log_b = 10*np.log10(np.mean(self.amp_b2)+1.0) \n ang = np.rad2deg(np.arctan2(np.mean(self.ab_im), np.mean(self.ab_re))) #review the way of avg this... i dont know if its the most correct way to do it...\n self.a2.set(log_a)\n self.b2.set(log_b)\n self.ang.set(ang)\n self.amp_rel.set(log_a-log_b)\n return 1", "def go(self):\n # ipdb.set_trace()\n self.ang_data = np.zeros(int(self.avg.get()))\n self.amp_a2 = np.zeros(int(self.avg.get()))\n self.amp_b2 = np.zeros(int(self.avg.get()))\n self.ab_re = np.zeros(int(self.avg.get()))\n self.ab_im = np.zeros(int(self.avg.get()))\n df = bw*1.0/channels\n self.index = trunc(float(self.freq2meas.get())/df)\n self.index_offset = trunc(int(self.chann_span.get()))\n self.enabler = 1\n #self.t = threading.Thread(target=self.obtain_data, name='data_thread')\n print(enabler)\n self.t.start()\n return 1", "def __init__(self):\n self.data0 = [] # This will hold data from ADC0\n self.data1 = [] # This will hold data from ADC1\n self.dev = _configure_device()", "def take_data(num_points, sampling_rate):\n\n\tanalog_input = Task()\n\tread = int32()\n\tdata = numpy.zeros((num_points,), dtype=numpy.float64)\n\n\t# DAQmx Configure Code\n\tanalog_input.CreateAIVoltageChan(\"Dev1/ai0\", \"\", DAQmx_Val_Cfg_Default, -10.0, 10.0, DAQmx_Val_Volts, None)\n\tanalog_input.CfgSampClkTiming(\"\", sampling_rate, DAQmx_Val_Rising, DAQmx_Val_FiniteSamps,num_points)\n\n\t# DAQmx Start Code\n\tanalog_input.StartTask()\n\n\t# DAQmx Read Code\n\tanalog_input.ReadAnalogF64(num_points, 10.0, DAQmx_Val_GroupByChannel, data, num_points, byref(read),None)\n\n\treturn data", "def readaccl(self):\r\n\t\tdata0 = bus.read_byte_data(LSM330_ACCL_ADDRESS, LSM330_OUT_X_L_A)\r\n\t\tdata1 = bus.read_byte_data(LSM330_ACCL_ADDRESS, LSM330_OUT_X_H_A)\r\n\t\t\r\n\t\txAccl = data1 * 256 + data0\r\n\t\tif xAccl > 32767 :\r\n\t\t\txAccl -= 65536\r\n\t\t\r\n\t\t\"\"\"Read data back from LSM330_OUT_Y_L_M(0x2A), 2 bytes\r\n\t\tY-Axis Mag LSB, Y-Axis Mag MSB\"\"\"\r\n\t\tdata0 = bus.read_byte_data(LSM330_ACCL_ADDRESS, LSM330_OUT_Y_L_A)\r\n\t\tdata1 = bus.read_byte_data(LSM330_ACCL_ADDRESS, LSM330_OUT_Y_H_A)\r\n\t\t\r\n\t\tyAccl = data1 * 256 + data0\r\n\t\tif yAccl > 32767 :\r\n\t\t\tyAccl -= 65536\r\n\t\t\r\n\t\t\"\"\"Read data back from LSM330_OUT_Z_L_M(0x2C), 2 bytes\r\n\t\tZ-Axis Mag LSB, Z-Axis Mag MSB\"\"\"\r\n\t\tdata0 = bus.read_byte_data(LSM330_ACCL_ADDRESS, LSM330_OUT_Z_L_A)\r\n\t\tdata1 = bus.read_byte_data(LSM330_ACCL_ADDRESS, LSM330_OUT_Z_H_A)\r\n\t\t\r\n\t\tzAccl = data1 * 256 + data0\r\n\t\tif zAccl > 32767 :\r\n\t\t\tzAccl -= 65536\r\n\t\t\r\n\t\treturn {'x' : xAccl, 'y' : yAccl, 'z' : zAccl}", "def acquisition(self):\n\t\twhile True:\n\t\t\tself.humidity, self.temperature = Adafruit_DHT.read_retry(SENSOR, PIN)\n\t\t\tprint (\"[{}] New measures from the Adafruit DHT:\\n\\tTemperature: {}C\\n\\tHumidity: {}%\".format(\n\t\t\t\tint(time.time()),\n\t\t\t\tself.temperature,\n\t\t\t\tself.humidity\n\t\t\t))\n\t\t\tmqttCli.publish(\"measure/temperature\", mqttJsonDump(self.temperature))\n\t\t\tmqttCli.publish(\"measure/humidity\", mqttJsonDump(self.humidity))\n\t\t\t\n\t\t\tself.updatePendingJson(\"humidity\", self.humidity, \"data\")\n\t\t\tself.updatePendingJson(\"temperature\", self.temperature, \"data\")\n\t\t\t\n\t\t\tr=req.get('http://localhost:9090/interacquisition')\n\t\t\tr = r.content\n\t\t\tr = json.loads(r)\n\t\t\tdelta_t = r[\"interacquisition\"]*60\n\t\t\t\n\t\t\tprint (\"[{}] Interacquisition time retrieved from the Room Catalog\".format(\n\t\t\t\tint(time.time()),\n\t\t\t))\n\t\t\t\n\t\t\ttime.sleep(delta_t)", "def get_calib_data(self):\n\n accel_offset_read = self.con.receive(registers.ACCEL_OFFSET_X_LSB_ADDR, 6)\n accel_offset_read_x = (accel_offset_read[1] << 8) | accel_offset_read[\n 0] # Combine MSB and LSB registers into one decimal\n accel_offset_read_y = (accel_offset_read[3] << 8) | accel_offset_read[\n 2] # Combine MSB and LSB registers into one decimal\n accel_offset_read_z = (accel_offset_read[5] << 8) | accel_offset_read[\n 4] # Combine MSB and LSB registers into one decimal\n\n accel_radius_read = self.con.receive(registers.ACCEL_RADIUS_LSB_ADDR, 2)\n accel_radius_read_value = (accel_radius_read[1] << 8) | accel_radius_read[0]\n\n mag_offset_read = self.con.receive(registers.MAG_OFFSET_X_LSB_ADDR, 6)\n mag_offset_read_x = (mag_offset_read[1] << 8) | mag_offset_read[\n 0] # Combine MSB and LSB registers into one decimal\n mag_offset_read_y = (mag_offset_read[3] << 8) | mag_offset_read[\n 2] # Combine MSB and LSB registers into one decimal\n mag_offset_read_z = (mag_offset_read[5] << 8) | mag_offset_read[\n 4] # Combine MSB and LSB registers into one decimal\n\n mag_radius_read = self.con.receive(registers.MAG_RADIUS_LSB_ADDR, 2)\n mag_radius_read_value = (mag_radius_read[1] << 8) | mag_radius_read[0]\n\n gyro_offset_read = self.con.receive(registers.GYRO_OFFSET_X_LSB_ADDR, 6)\n gyro_offset_read_x = (gyro_offset_read[1] << 8) | gyro_offset_read[\n 0] # Combine MSB and LSB registers into one decimal\n gyro_offset_read_y = (gyro_offset_read[3] << 8) | gyro_offset_read[\n 2] # Combine MSB and LSB registers into one decimal\n gyro_offset_read_z = (gyro_offset_read[5] << 8) | gyro_offset_read[\n 4] # Combine MSB and LSB registers into one decimal\n\n calib_data = {'accel_offset': {'x': accel_offset_read_x, 'y': accel_offset_read_y, 'z': accel_offset_read_z}, 'accel_radius': accel_radius_read_value,\n 'mag_offset': {'x': mag_offset_read_x, 'y': mag_offset_read_y, 'z': mag_offset_read_z}, 'mag_radius': mag_radius_read_value,\n 'gyro_offset': {'x': gyro_offset_read_x, 'y': gyro_offset_read_y, 'z': gyro_offset_read_z}}\n\n return calib_data", "def read_values(self):\n temp, acc, gyro = self.read_ag_data()\n tempc = lsm9ds1.TEMPC_0 + temp * lsm9ds1.TEMP_SENSOR_SCALE\n tempf = (tempc * 9/5) + 32\n acc = [c * lsm9ds1.ACC_SENSOR_SCALE for c in acc]\n gyro = [g * lsm9ds1.DPS_SENSOR_SCALE for g in gyro]\n return tempf, acc, gyro", "def get_accel_data(self):\n x = self.read_i2c_word(self.ACCEL_XOUT0)\n y = self.read_i2c_word(self.ACCEL_YOUT0)\n z = self.read_i2c_word(self.ACCEL_ZOUT0)\n\n accel_scale_modifier = None\n accel_range = self.read_accel_range(True)\n\n if accel_range == self.ACCEL_RANGE_2G:\n accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_2G\n elif accel_range == self.ACCEL_RANGE_4G:\n accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_4G\n elif accel_range == self.ACCEL_RANGE_8G:\n accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_8G\n elif accel_range == self.ACCEL_RANGE_16G:\n accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_16G\n else:\n print(\"Unkown range - accel_scale_modifier set to self.ACCEL_SCALE_MODIFIER_2G\")\n accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_2G\n\n x = x / accel_scale_modifier\n y = y / accel_scale_modifier\n z = z / accel_scale_modifier\n\n x = x * self.GRAVITIY_MS2\n y = y * self.GRAVITIY_MS2\n z = z * self.GRAVITIY_MS2\n return [x, y, z]", "def startDataAcq(self):\r\n\t\tglobal payload, control, output_settings, serials, datfiles\r\n\t\t# INITIALIZE THE OUTPUT FOLDER STRUCTURE\r\n\t\tcheck_dir(output_settings['folder'], output_settings['cruise'], payload)\r\n\t\tconfirm_dir(output_settings['folder'], output_settings['cruise'])\r\n\t\t# FIND THE START TIME\r\n\t\toutput_settings['start_time'] = init_time()\r\n\t\t# PRINT THE START TIME\r\n\t\tprint_spacer()\r\n\t\tprint 'Local Time: ', time.ctime(output_settings['start_time'])\r\n\t\tprint 'UTC: ', time.asctime(time.gmtime(output_settings['start_time']))\r\n\t\t\r\n\t\t# LOOP THROUGH THE SCIENTIFIC PAYLOAD\r\n\t\tfor k in payload.keys():\r\n\t\t\ttry:\r\n\t\t\t\tif serials[k].isOpen():\r\n\t\t\t\t\tclose_serial(serials[k])\r\n\t\t\texcept KeyError:\r\n\t\t\t\tprint ' '\r\n\t\t\t\t# print 'Serial port connected to '+k+' was not previously open.'\r\n\t\t\t# open the serial port\r\n\t\t\tserials[k] = init_serial(payload[k])\r\n\t\t\tif serials[k].isOpen():\t\t\t\t\r\n\t\t\t\t# print the serial info\r\n\t\t\t\tprint 'Receiving data from '+k\r\n\t\t\t\t# initialize the data file\r\n\t\t\t\tdatfiles[k] = init_datafile(output_settings, payload[k])\r\n\t\t\t\t# read one line because the first one after opening a port is usually gibberish\r\n\t\t\t\tline = serials[k].readline()\r\n\t\t\telse: \r\n\t\t\t\tprint 'Unable to connect to serial port '+payload[k]['port']+' connected to '+k\r\n\t\t\t# pause get everything setup\r\n\t\t\ttime.sleep(1)\r\n\t\t# start the loop \r\n\t\tcontrol.combine()", "def read_data(self):\r\n\t\tdata0 = bus.read_byte(A1332_DEFAULT_ADDRESS)\r\n\t\tdata1 = bus.read_byte(A1332_DEFAULT_ADDRESS)\r\n\t\t\r\n\t\ttime.sleep(0.5)\r\n\t\t\r\n\t\t# Checking valid data\r\n\t\twhile (data0 == 0) and (data1 == 0) :\r\n\t\t\tdata0 = bus.read_byte(A1332_DEFAULT_ADDRESS)\r\n\t\t\tdata1 = bus.read_byte(A1332_DEFAULT_ADDRESS)\r\n\t\t\r\n\t\t# Convert the data to 12-bits\r\n\t\traw_adc = ((data0 & 0x0F) * 256.0) + data1\r\n\t\tangle = (raw_adc / 4096.0) * 360.0\r\n\t\t\r\n\t\treturn {'a' : angle}", "def data_sample(CI, CC, thermal, calibration, health, driver_monitor, gps_location,\n poller, cal_status, cal_perc, overtemp, free_space, low_battery,\n driver_status, geofence, state, mismatch_counter, params):\n\n # Update carstate from CAN and create events\n CS = CI.update(CC)\n events = list(CS.events)\n enabled = isEnabled(state)\n\n # Receive from sockets\n td = None\n cal = None\n hh = None\n dm = None\n gps = None\n\n for socket, event in poller.poll(0):\n if socket is thermal:\n td = messaging.recv_one(socket)\n elif socket is calibration:\n cal = messaging.recv_one(socket)\n elif socket is health:\n hh = messaging.recv_one(socket)\n elif socket is driver_monitor:\n dm = messaging.recv_one(socket)\n elif socket is gps_location:\n gps = messaging.recv_one(socket)\n\n if td is not None:\n overtemp = td.thermal.thermalStatus >= ThermalStatus.red\n free_space = td.thermal.freeSpace < 0.15 # under 15% of space free no enable allowed\n low_battery = td.thermal.batteryPercent < 1 # at zero percent battery, OP should not be allowed\n\n # Create events for battery, temperature and disk space\n if low_battery:\n events.append(create_event('lowBattery', [ET.NO_ENTRY, ET.SOFT_DISABLE]))\n if overtemp:\n events.append(create_event('overheat', [ET.NO_ENTRY, ET.SOFT_DISABLE]))\n if free_space:\n events.append(create_event('outOfSpace', [ET.NO_ENTRY]))\n\n # Handle calibration\n if cal is not None:\n cal_status = cal.liveCalibration.calStatus\n cal_perc = cal.liveCalibration.calPerc\n\n if cal_status != Calibration.CALIBRATED:\n if cal_status == Calibration.UNCALIBRATED:\n events.append(create_event('calibrationIncomplete', [ET.NO_ENTRY, ET.SOFT_DISABLE, ET.PERMANENT]))\n else:\n events.append(create_event('calibrationInvalid', [ET.NO_ENTRY, ET.SOFT_DISABLE]))\n\n # When the panda and controlsd do not agree on controls_allowed\n # we want to disengage openpilot. However the status from the panda goes through\n # another socket than the CAN messages, therefore one can arrive earlier than the other.\n # Therefore we allow a mismatch for two samples, then we trigger the disengagement.\n if not enabled:\n mismatch_counter = 0\n\n if hh is not None:\n controls_allowed = hh.health.controlsAllowed\n if not controls_allowed and enabled:\n mismatch_counter += 1\n if mismatch_counter >= 2:\n events.append(create_event('controlsMismatch', [ET.IMMEDIATE_DISABLE]))\n\n # Driver monitoring\n if dm is not None:\n driver_status.get_pose(dm.driverMonitoring, params)\n\n # Geofence\n if geofence is not None and gps is not None:\n geofence.update_geofence_status(gps.gpsLocationExternal, params)\n if geofence is not None and not geofence.in_geofence:\n events.append(create_event('geofence', [ET.NO_ENTRY, ET.WARNING]))\n\n return CS, events, cal_status, cal_perc, overtemp, free_space, low_battery, mismatch_counter", "def get_sensor_data(self):\n # Initialize ROS msgs\n imu_raw_msg = Imu()\n imu_msg = Imu()\n mag_msg = MagneticField()\n temp_msg = Temperature()\n\n # read from sensor\n buf = self.con.receive(registers.BNO055_ACCEL_DATA_X_LSB_ADDR, 45)\n # Publish raw data\n imu_raw_msg.header.stamp = self.node.get_clock().now().to_msg()\n imu_raw_msg.header.frame_id = self.param.frame_id.value\n # TODO: do headers need sequence counters now?\n # imu_raw_msg.header.seq = seq\n\n # TODO: make this an option to publish?\n imu_raw_msg.orientation_covariance = [\n self.param.variance_orientation.value[0], 0.0, 0.0,\n 0.0, self.param.variance_orientation.value[1], 0.0,\n 0.0, 0.0, self.param.variance_orientation.value[2]\n ]\n\n imu_raw_msg.linear_acceleration.x = \\\n self.unpackBytesToFloat(buf[0], buf[1]) / self.param.acc_factor.value\n imu_raw_msg.linear_acceleration.y = \\\n self.unpackBytesToFloat(buf[2], buf[3]) / self.param.acc_factor.value\n imu_raw_msg.linear_acceleration.z = \\\n self.unpackBytesToFloat(buf[4], buf[5]) / self.param.acc_factor.value\n imu_raw_msg.linear_acceleration_covariance = [\n self.param.variance_acc.value[0], 0.0, 0.0,\n 0.0, self.param.variance_acc.value[1], 0.0,\n 0.0, 0.0, self.param.variance_acc.value[2]\n ]\n imu_raw_msg.angular_velocity.x = \\\n self.unpackBytesToFloat(buf[12], buf[13]) / self.param.gyr_factor.value\n imu_raw_msg.angular_velocity.y = \\\n self.unpackBytesToFloat(buf[14], buf[15]) / self.param.gyr_factor.value\n imu_raw_msg.angular_velocity.z = \\\n self.unpackBytesToFloat(buf[16], buf[17]) / self.param.gyr_factor.value\n imu_raw_msg.angular_velocity_covariance = [\n self.param.variance_angular_vel.value[0], 0.0, 0.0,\n 0.0, self.param.variance_angular_vel.value[1], 0.0,\n 0.0, 0.0, self.param.variance_angular_vel.value[2]\n ]\n # node.get_logger().info('Publishing imu message')\n self.pub_imu_raw.publish(imu_raw_msg)\n\n # TODO: make this an option to publish?\n # Publish filtered data\n imu_msg.header.stamp = self.node.get_clock().now().to_msg()\n imu_msg.header.frame_id = self.param.frame_id.value\n\n q = Quaternion()\n # imu_msg.header.seq = seq\n q.w = self.unpackBytesToFloat(buf[24], buf[25])\n q.x = self.unpackBytesToFloat(buf[26], buf[27])\n q.y = self.unpackBytesToFloat(buf[28], buf[29])\n q.z = self.unpackBytesToFloat(buf[30], buf[31])\n # TODO(flynneva): replace with standard normalize() function\n # normalize\n norm = sqrt(q.x * q.x + q.y * q.y + q.z * q.z + q.w * q.w)\n imu_msg.orientation.x = q.x / norm\n imu_msg.orientation.y = q.y / norm\n imu_msg.orientation.z = q.z / norm\n imu_msg.orientation.w = q.w / norm\n\n imu_msg.orientation_covariance = imu_raw_msg.orientation_covariance\n\n imu_msg.linear_acceleration.x = \\\n self.unpackBytesToFloat(buf[32], buf[33]) / self.param.acc_factor.value\n imu_msg.linear_acceleration.y = \\\n self.unpackBytesToFloat(buf[34], buf[35]) / self.param.acc_factor.value\n imu_msg.linear_acceleration.z = \\\n self.unpackBytesToFloat(buf[36], buf[37]) / self.param.acc_factor.value\n imu_msg.linear_acceleration_covariance = imu_raw_msg.linear_acceleration_covariance\n imu_msg.angular_velocity.x = \\\n self.unpackBytesToFloat(buf[12], buf[13]) / self.param.gyr_factor.value\n imu_msg.angular_velocity.y = \\\n self.unpackBytesToFloat(buf[14], buf[15]) / self.param.gyr_factor.value\n imu_msg.angular_velocity.z = \\\n self.unpackBytesToFloat(buf[16], buf[17]) / self.param.gyr_factor.value\n imu_msg.angular_velocity_covariance = imu_raw_msg.angular_velocity_covariance\n self.pub_imu.publish(imu_msg)\n\n # Publish magnetometer data\n mag_msg.header.stamp = self.node.get_clock().now().to_msg()\n mag_msg.header.frame_id = self.param.frame_id.value\n # mag_msg.header.seq = seq\n mag_msg.magnetic_field.x = \\\n self.unpackBytesToFloat(buf[6], buf[7]) / self.param.mag_factor.value\n mag_msg.magnetic_field.y = \\\n self.unpackBytesToFloat(buf[8], buf[9]) / self.param.mag_factor.value\n mag_msg.magnetic_field.z = \\\n self.unpackBytesToFloat(buf[10], buf[11]) / self.param.mag_factor.value\n mag_msg.magnetic_field_covariance = [\n self.param.variance_mag.value[0], 0.0, 0.0,\n 0.0, self.param.variance_mag.value[1], 0.0,\n 0.0, 0.0, self.param.variance_mag.value[2]\n ]\n self.pub_mag.publish(mag_msg)\n\n # Publish temperature\n temp_msg.header.stamp = self.node.get_clock().now().to_msg()\n temp_msg.header.frame_id = self.param.frame_id.value\n # temp_msg.header.seq = seq\n temp_msg.temperature = float(buf[44])\n self.pub_temp.publish(temp_msg)", "def _start_device(self):\r\n enabled = [1,1,1,0]\r\n self._data = [np.empty(self._samples,dtype=np.int16) for i in range(3)]\r\n self._data_buffer = [x.ctypes for x in self._data]\r\n self._timebase = self.get_timebase(self._sampling_time)\r\n self.v_rangeAPI = [7,7,7,0] # 5V range\r\n self.v_range = [CHANNEL_RANGE[i][\"rangeV\"] for i in self.v_rangeAPI]\r\n with self._driver_lock:\r\n for i,v,en in zip(range(4),self.v_rangeAPI,enabled): # three active channels\r\n m = self._lib.ps2000aSetChannel(self._handle,\r\n c_int32(i), # channel\r\n c_int16(en), # enabled\r\n c_int32(1), # DC coupling\r\n c_int32(v), # voltage range (API value)\r\n c_float(0)) # 0V offset\r\n check_result(m)\r\n\r\n if en:\r\n m = self._lib.ps2000aSetDataBuffer(self._handle,\r\n c_int32(i), # channel\r\n self._data_buffer[i],\r\n c_int32(self._samples),\r\n c_uint32(0), # segment index\r\n c_int32(0)) # ratio mode\r\n check_result(m)\r\n\r\n threshold_v = 3\r\n threshold_adc = int(threshold_v * MAX_EXT / self.v_range[2])\r\n m = self._lib.ps2000aSetSimpleTrigger(self._handle,\r\n c_int16(1), # enabled\r\n c_int32(2), # Trigger off Channel C\r\n c_int16(threshold_adc),\r\n c_int32(2), # direction = rising\r\n c_uint32(0), # no delay\r\n c_int16(2000)) # autotrigger after 2 seconds if no trigger occurs\r\n check_result(m)\r\n\r\n # Send AWG Info to Picoscope\r\n delta_phase = c_uint32()\r\n output_freq = 1/self._sampling_duration\r\n # output_freq = 1E6\r\n m = self._lib.ps2000aSigGenFrequencyToPhase(self._handle,\r\n c_double(output_freq),\r\n c_int32(0),\r\n c_uint32(len(self._waveform)),\r\n byref(delta_phase))\r\n check_result(m)\r\n delta_phase = int(delta_phase.value)\r\n offset_voltage = 1\r\n pk2pk = 2\r\n # output_freq = 1E6\r\n # wave_type = {'sine':0,'square':1,'triangle':2,'DC':3,\r\n # 'rising sawtooth':4,'falling sawtooth':5,'sin(x)/x':6,\r\n # 'Gaussian':7,'half-sine':8}\r\n waveformPtr = self._waveform.ctypes\r\n trigger_type = 2 # siggen gate high\r\n trigger_source = 4 # software trigger\r\n m = self._lib.ps2000aSetSigGenArbitrary(self._handle,\r\n c_int32(int(offset_voltage*1E6)), \r\n c_uint32(int(pk2pk*1E6)),\r\n c_uint32(delta_phase), # start delta phase\r\n c_uint32(delta_phase), # stop delta phase\r\n c_uint32(0), # delta phase increment\r\n c_uint32(0), # dwell count\r\n waveformPtr, # arbitrary waveform\r\n c_int32(self._samples), # arbitrary waveform size\r\n c_int32(0), # sweep type for delta phase\r\n c_int32(0), # extra operations\r\n c_int32(0), # index mode\r\n c_uint32(1), # shots\r\n c_uint32(0), # sweeps\r\n c_int32(trigger_type),\r\n c_int32(trigger_source),\r\n c_int16(0)) # extIn threshold\r\n check_result(m)\r\n # m = self._lib.ps2000aSetSigGenBuiltIn(self._handle,\r\n # c_int32(int(offset_voltage*1E6)), # offset voltage\r\n # c_uint32(int(pk2pk*1E6)),# peak to peak voltage\r\n # c_int32(wave_type['square']), # wave type\r\n # c_float(output_freq), # start frequency\r\n # c_float(output_freq), # stop frequency\r\n # c_float(0), # increment\r\n # c_float(0), # dwell count\r\n # c_int32(0), # sweep type\r\n # c_int32(0), # operation\r\n # c_uint32(4), # shots\r\n # c_uint32(0), # sweeps\r\n # c_int32(trigger_type), \r\n # c_int32(trigger_source),\r\n # c_int16(0)) # extIn threshold\r\n # check_result(m)\r\n\r\n # for i in enabled:\r\n # if i:\r\n # m = self._lib.ps2000aSetDataBuffer(self._handle,\r\n # c_int32(i), # channel\r\n # self._data_buffer[i],\r\n # c_int32(self._samples),\r\n # c_uint32(0), # segment index\r\n # c_int32(0)) # ratio mode\r\n # check_result(m)\r\n\r\n self._save_thread = Thread(target=self.save,args=(self._save_queue,))\r\n self._save_thread.daemon = True\r\n self._save_thread.start()\r\n\r\n self._process_thread = Thread(target=self.process,args=(self._process_queue,self._save_queue))\r\n self._process_thread.daemon = True\r\n self._process_thread.start()\r\n\r\n self._collect_thread = Thread(target=self.run_loop,args=(self._process_queue,))\r\n self._collect_thread.daemon = True\r\n self._collect_thread.start()\r\n\r\n return True", "def _read_calibration_data(self):\n #Declare global variables.\n global calT1\n global calT2\n global calT3\n global calP1\n global calP2\n global calP3\n global calP4\n global calP5\n global calP6\n global calP7\n global calP8\n global calP9\n global calP10\n global calH1\n global calH2\n global calH3\n global calH4\n global calH5\n global calH6\n global calH7\n global calGH1\n global calGH2\n global calGH3\n global calResHeatRange\n global calResHeatVal\n global calRangeSwErr\n\n #Temperature calibration.\n calT1 = self._read_2bytes_as_ushort_lsbfirst(self.BME680_T1_LSB_REG)\n calT2 = self._read_2bytes_as_short_lsbfirst(self.BME680_T2_LSB_REG)\n calT3 = self._read_register_1sbyte(self.BME680_T3_REG)\n\n #Pressure calibration.\n calP1 = self._read_2bytes_as_ushort_lsbfirst(self.BME680_P1_LSB_REG)\n calP2 = self._read_2bytes_as_short_lsbfirst(self.BME680_P2_LSB_REG)\n calP3 = self._read_register_1sbyte(self.BME680_P3_REG)\n calP4 = self._read_2bytes_as_short_lsbfirst(self.BME680_P4_LSB_REG)\n calP5 = self._read_2bytes_as_short_lsbfirst(self.BME680_P5_LSB_REG)\n calP6 = self._read_register_1sbyte(self.BME680_P6_REG)\n calP7 = self._read_register_1sbyte(self.BME680_P7_REG)\n calP8 = self._read_2bytes_as_short_lsbfirst(self.BME680_P8_LSB_REG)\n calP9 = self._read_2bytes_as_short_lsbfirst(self.BME680_P9_LSB_REG)\n calP10 = self._read_register_1ubyte(self.BME680_P10_REG)\n\n #Humidity calibration.\n calH1 = self._read_register_1ubyte(self.BME680_H1_MSB_REG) << 4 | (self._read_register_1ubyte(self.BME680_H1_LSB_REG) & 0x0F)\n calH2 = self._read_register_1ubyte(self.BME680_H2_MSB_REG) << 4 | ((self._read_register_1ubyte(self.BME680_H2_LSB_REG)) >> 4)\n calH3 = self._read_register_1sbyte(self.BME680_H3_REG)\n calH4 = self._read_register_1sbyte(self.BME680_H4_REG)\n calH5 = self._read_register_1sbyte(self.BME680_H5_REG)\n calH6 = self._read_register_1ubyte(self.BME680_H6_REG)\n calH7 = self._read_register_1sbyte(self.BME680_H7_REG)\n\n #Gas calibration.\n calGH1 = self._read_register_1sbyte(self.BME680_GH1_REG)\n calGH2 = self._read_2bytes_as_short_lsbfirst(self.BME680_GH2_LSB_REG)\n calGH3 = self._read_register_1sbyte(self.BME680_GH3_REG)\n\n #Heat calibration.\n calResHeatRange = (self._read_register_1ubyte(self.BME680_RES_HEAT_RANGE) & 0x30) / 16\n calResHeatVal = self._read_register_1sbyte(self.BME680_RES_HEAT_VAL)\n calRangeSwErr = (self._read_register_1sbyte(self.BME680_RANGE_SW_ERR) & 0xF0) / 16", "def _loadData(self):\n self.d = read_ac_data.read_ac_data_wrapper(self.sc_id, self.date,\n dType='10Hz')\n return", "def read_ag_data(self):\n data = self.ag.read_bytes(Register.OUT_TEMP_L, 14)\n temp = lsm9ds1.to_int16(data[0:2])\n gyro = lsm9ds1.to_vector_left_to_right_hand_rule(data[2:8])\n acc = lsm9ds1.to_vector_left_to_right_hand_rule(data[8:14])\n return temp, acc, gyro", "def _obtain_data(self):\n (self.data_df, self.column_df, self.station_name, self.log_file, self.station_lat, self.station_lon,\n self.station_elev, self.ws_anemometer_height, self.missing_fill_value, self.script_mode,\n self.auto_mode, self.fill_mode, self.metadata_mode, self.generate_bokeh, self.metadata_df,\n metadata_series) = input_functions.obtain_data(self.config_path, self.metadata_path)\n\n if self.script_mode == 1: # correcting data\n self.mc_iterations = 1000 # Number of iters for MC simulation of thornton running solar radiation gen\n else:\n self.mc_iterations = 50 # if we're not correcting data then only do a few iterations to save time\n\n print(\"\\nSystem: Raw data successfully extracted from station file.\")\n\n # Extract individual variables from data frame back into to numpy arrays.\n self.data_year = np.array(self.data_df.year)\n self.data_month = np.array(self.data_df.month)\n self.data_day = np.array(self.data_df.day)\n self.data_tavg = np.array(self.data_df.tavg)\n self.data_tmax = np.array(self.data_df.tmax)\n self.data_tmin = np.array(self.data_df.tmin)\n self.data_tdew = np.array(self.data_df.tdew)\n self.data_ea = np.array(self.data_df.ea)\n self.data_rhavg = np.array(self.data_df.rhavg)\n self.data_rhmax = np.array(self.data_df.rhmax)\n self.data_rhmin = np.array(self.data_df.rhmin)\n self.data_rs = np.array(self.data_df.rs)\n self.data_ws = np.array(self.data_df.ws)\n self.data_precip = np.array(self.data_df.precip)\n\n self.output_file_path = \"correction_files/\" + self.station_name + \"_output\" + \".xlsx\"", "def getData(self):\n self.ser.write(b'g')\n readString = self.ser.readline()\n print(readString)\n readString = readString.decode(\"utf-8\")\n splittedString = readString.split('\\t')\n for i, num in enumerate(splittedString):\n try:\n splittedString[i] = int(float(num))\n except ValueError:\n pass\n self.accString.set('Accleration\\nX: %.5f\\nY: %.5f\\nZ: %.5f' %\n (splittedString[0], splittedString[1],\n splittedString[2]))\n self.logFile.write(readString)\n self.comJob = root.after(10, self.getData)", "def L3G_acquisition(add):\n\n # control register\n CTRL_REG1 = 0x20\n CTRL_REG4 = 0x23\n LOW_ODR = 0x39\n FIFO_CTRL = 0x2e\n # output register\n OUT_X_L = 0x28\n OUT_X_H = 0x29\n OUT_Y_L = 0x2a\n OUT_Y_H = 0x2b\n OUT_Z_L = 0x2c\n OUT_Z_H = 0x2d\n\n # low odr mode, 50Hz, 2000 dps full scale\n bus.write_byte_data(add, CTRL_REG1, 0b10001111)\n bus.write_byte_data(add, CTRL_REG4, 0b00110000)\n bus.write_byte_data(add, LOW_ODR, 0b00000001)\n bus.write_byte_data(add, FIFO_CTRL, 0b01000000)\n\n # accelerator accumulate\n while True:\n uint16_gx = (bus.read_byte_data(add, OUT_X_H) << 8) + \\\n bus.read_byte_data(add, OUT_X_L)\n uint16_gy = (bus.read_byte_data(add, OUT_Y_H) << 8) + \\\n bus.read_byte_data(add, OUT_Y_L)\n uint16_gz = (bus.read_byte_data(add, OUT_Z_H) << 8) + \\\n bus.read_byte_data(add, OUT_Z_L)\n\n gx = twos_comp(uint16_gx, 16)\n gy = twos_comp(uint16_gy, 16)\n gz = twos_comp(uint16_gz, 16)\n\n yield [gx, gy, gz]", "def read(self):\n try:\n cmd = 'SAMP:COUN 1' \n self.handle.write(cmd) #one sample per trigger\n self.handle.write('TRIG:SOUR BUS') #triggered by command\n self.handle.write('TRIG:COUN 1') #one trigger to return to wait for trg\n self.handle.write('INIT:IMM') #DVM to \"wait for trigger\" \n self.handle.write('*TRG')\n startTime = time.time()\n while True: #wait until measuring flag goes to 0\n try:\n measured = self.handle.ask(\"DATA:POIN?\")\n measured = measured.strip() #remove CR \n measured = int(measured) #convert to number\n if measured == 1: #final number of samples achieved\n break;\n except Exception:\n print('Dvm34411:read() polling failed !')\n raise\n \n if time.time() - startTime > self.timeout:\n print('Dvm34411:read() timeout !')\n return False\n \n time.sleep(1) \n reading = self.handle.ask('R? 1;') #definite-Length block format\n except Exception:\n print('Dvm34411.read() failed !')\n raise\n if reading[0] != '#':\n print('Dvm34411.read() DLB format error - # expected !')\n return False\n digits = int(reading[1])\n reading = reading[2 + digits:]\n rdg = float(reading)\n return rdg", "def get_data(self):\n self.dev.write(1, 'A0')\n digit1, digit2 = self.dev.read(0x81, 64)[:2]\n # Save the data as voltage between 0.0 and 5.0\n self.data0.append((digit1 + 256*digit2)*5.0/1024)", "def launch_devices(self):\n self.data[0], temp = alghoritm.temperature(self.data[0], self.set_thermostat, 0) # get value\n HC35_3S.launch(self.data_path, self.samples_size, temp) # set it via device\n\n self.data[1], humidi = alghoritm.humidity(self.data[1], self.set_humidifier, 0)\n humidifier.launch(self.data_path, self.samples_size, humidi)\n\n self.data[2], moistu = alghoritm.moisture(self.data[2], self.set_sprinklers, 0)\n HUNTER.launch(self.data_path, self.samples_size, moistu)\n\n self.data[3], o2 = alghoritm.o2(self.data[3], self.set_ventilation, 0)\n ventilation.launch_o2(self.data_path, self.samples_size, o2)\n\n self.data[4], co2 = alghoritm.co2(self.data[4], self.set_ventilation, 0)\n ventilation.launch_co2(self.data_path, self.samples_size, co2)", "def timer_tick(self, *args):\n # Generate a new number and increment the tick count\n # x, y, z=accel.read()\n\n # ADXL345 address, 0x53(83)\n\n # Select bandwidth rate register, 0x2C(44)\n\n #\t\t0x0A(10)\tNormal mode, Output data rate = 100 Hz\n\n bus.write_byte_data(0x53, 0x2C, 0x0A)\n\n # ADXL345 address, 0x53(83)\n\n # Select power control register, 0x2D(45)\n\n #\t\t0x08(08)\tAuto Sleep disable\n\n bus.write_byte_data(0x53, 0x2D, 0x08)\n\n # ADXL345 address, 0x53(83)\n\n # Select data format register, 0x31(49)\n\n #\t\t0x08(08)\tSelf test disabled, 4-wire interface\n\n #\t\t\t\t\tFull resolution, Range = +/-2g\n\n bus.write_byte_data(0x53, 0x31, 0x08)\n\n # time.sleep(0.5)\n\n # ADXL345 address, 0x53(83)\n\n # Read data back from 0x32(50), 2 bytes\n\n # X-Axis LSB, X-Axis MSB\n\n data0 = bus.read_byte_data(0x53, 0x32)\n\n data1 = bus.read_byte_data(0x53, 0x33)\n\n # Convert the data to 10-bits\n\n xAccl = ((data1 & 0x03) * 256) + data0\n\n if xAccl > 511:\n xAccl -= 1024\n\n # ADXL345 address, 0x53(83)\n\n # Read data back from 0x34(52), 2 bytes\n\n # Y-Axis LSB, Y-Axis MSB\n\n data0 = bus.read_byte_data(0x53, 0x34)\n\n data1 = bus.read_byte_data(0x53, 0x35)\n\n # Convert the data to 10-bits\n\n yAccl = ((data1 & 0x03) * 256) + data0\n\n if yAccl > 511:\n yAccl -= 1024\n\n # ADXL345 address, 0x53(83)\n\n # Read data back from 0x36(54), 2 bytes\n\n # Z-Axis LSB, Z-Axis MSB\n\n data0 = bus.read_byte_data(0x53, 0x36)\n\n data1 = bus.read_byte_data(0x53, 0x37)\n\n # Convert the data to 10-bits\n\n zAccl = ((data1 & 0x03) * 256) + data0\n\n if zAccl > 511:\n zAccl -= 1024\n\n # Output data to screen\n\n # print \"Acceleration in X-Axis : %d\" %xAccl\n\n # print \"Acceleration in Y-Axis : %d\" %yAccl\n\n # print \"Acceleration in Z-Axis : %d\" %zAccl\n new_val = xAccl\n self.num_ticks += 1\n\n # grab the existing data, truncate it, and append the new point.\n # This isn't the most efficient thing in the world but it works.\n cur_data = self.viewer.data\n new_data = np.hstack((cur_data[-self.max_num_points + 1:], [new_val]))\n new_index = np.arange(self.num_ticks - len(new_data) + 1,\n self.num_ticks + 0.01)\n\n self.viewer.index = new_index\n self.viewer.data = new_data\n return", "def AcquiredData (self, arguments=None) :\n\t\tself.OODriver.Wrapper_getSpectrum(self.wrapperHandle,self.spectrometerIndex,self.bufferHandle)\n\t\t\n\t\tif self.OODriver.Wrapper_isSaturated(self.wrapperHandle,self.spectrometerIndex) :\n\t\t\tprint \"Warning: OcenOptics spectrometer is saturated!\"\n\t\t\t\n\t\ttry : return self.buffer[self.spectral_interval]\n\t\texcept AttributeError : return self.buffer", "def recive_data(self):\n # read all available data\n while self.ser.inWaiting() > self.INPUT_DATA_SIZE+1:\n data = array.array('c')\n # search the header\n data.append(self.ser.read(1))\n while data[0] != chr(1):\n data[0] = self.ser.read(1)\n \n # wait for all available data\n while self.ser.inWaiting() < (self.INPUT_DATA_SIZE-1):\n time.sleep(0.03);\n \n # recives data\n data = self.ser.read(self.INPUT_DATA_SIZE-1)\n \n # prove if you want graphical data\n if self.pushButton_monitor.isChecked():\n # decodes the data\n t = struct.unpack('I', data[3]+data[2]+data[1]+data[0])\n r = struct.unpack('f', data[4]+data[5]+data[6]+data[7])\n x0 = struct.unpack('f', data[8]+data[9]+data[10]+data[11])\n x1 = struct.unpack('f', data[12]+data[13]+data[14]+data[15])\n u = struct.unpack('f', data[16]+data[17]+data[18]+data[19])\n \n self.time = t[0]*25e-9\n \n # prepare the string output\n aux_str = \" t = \"+str(self.time)+\"\\t\"\n aux_str += \" r = \"+str(r[0])+\"\\t\"\n aux_str += \" u = \"+str(u[0])+\"\\t\"\n aux_str += \" x1 = \"+str(x1[0])+\"\\t\"\n aux_str += \" x0 = \"+str(x0[0])+\"\\n\"\n # print string output\n self.textBrowser.insertPlainText(aux_str)\n \n # append data to the arrays\n self.graf_t.append(self.time)\n self.graf_r.append(r[0])\n self.graf_x0.append(x0[0])\n self.graf_x1.append(x1[0])\n self.graf_u.append(u[0])\n \n # remove one value if the arrays have maximum length\n if self.graf_t.buffer_info()[1] >= NUM_SAMPLES:\n self.graf_t.pop(0)\n self.graf_r.pop(0)\n self.graf_x0.pop(0)\n self.graf_x1.pop(0)\n self.graf_u.pop(0)\n \n # reload number of samples lavel\n self.label_samples_value.setText(str(self.graf_t.buffer_info()[1]))\n # reload number of waiting chars in serial rx buffer\n self.label_rx_buff_value.setText(str(self.ser.inWaiting()))\n\n # reload mutex area\n self.updated_data = 1\n \n # prove if there are available id's\n if (self.actionPC_Monitor.isChecked() and data[20] == chr(2)):\n # if it is true, looks how much id's\n i = struct.unpack('B', data[21])\n\n if i[0] < STACK_SIZE:\n for z in range(i[0]):\n new_device = struct.unpack('B', data[z+22])\n new_string = str(new_device[0])\n \n llista = self.listWidget_link.findItems(new_string, QtCore.Qt.MatchExactly)\n if len(llista) == 0:\n self.listWidget_link.addItem(new_string)", "def read_data(self):\n self.data = self.i2c.readfrom_mem(accel_address, x_data, 6)\n data_xyz = []\n for i in range(3):\n value = (self.data[2*i + 1] << 8) | self.data[2*i]\n data_xyz.append(self.get_acceleration(value) - self.offset[i])\n data_xyz.append(utime.ticks_ms() - self.time)\n return data_xyz", "def getAccelerometer(self):\n cmd = 'A'\n acc = [-1,-1,-1]\n out = self.getData(cmd)\n out = str(out, 'utf-8')\n if self.debug:\n print(out)\n isStart = False\n if out[0] == 'a':\n j = 0\n for i in range(len(out)):\n if isStart:\n if out[i] == ',':\n acc[j] = int(data)\n j = j + 1\n isStart = False\n else:\n data=data+out[i]\n if out[i] == ',':\n isStart = True\n data = ''\n acc[j] = int(data)\n return acc", "def acq(self, session, params=None):\n pm = Pacemaker(1, quantize=True)\n self.take_data = True\n while self.take_data:\n pm.sleep()\n\n dp = int(self.daq_port)\n adr = \"/iolinkmaster/port[{}]/iolinkdevice/pdin/getdata\".format(dp)\n url = 'http://{}'.format(self.ip_address)\n\n r = requests.post(url, json={\"code\": \"request\", \"cid\": -1, \"adr\": adr})\n value = r.json()['data']['value']\n\n flow_gpm, temp_f = extract(value) # units [gallons/minute], [F]\n flow = flow_gpm * 3.785411784 # liters/minute\n flow = round(flow, 1)\n temp = (temp_f - 32) * (5 / 9) # Celsius\n temp = round(temp, 1)\n now = time.time()\n\n data = {'block_name': 'flowmeter',\n 'timestamp': now,\n 'data': {'flow': flow,\n 'temperature': temp}\n }\n\n self.agent.publish_to_feed('flowmeter', data)\n\n session.data = {\"timestamp\": now,\n \"fields\": {}}\n\n session.data['fields']['flow'] = flow\n session.data['fields']['temperature'] = temp\n\n if params['test_mode']:\n break\n\n return True, 'Acquisition exited cleanly.'", "def timer_tick(self, *args):\n # Generate a new number and increment the tick count\n\t #x, y, z=accel.read()\n\n # ADXL345 address, 0x53(83)\n # Select bandwidth rate register, 0x2C(44)\n #\t\t0x0A(10)\tNormal mode, Output data rate = 100 Hz\n bus.write_byte_data(0x53, 0x2C, 0x0A)\n\n # ADXL345 address, 0x53(83)\n # Select power control register, 0x2D(45)\n #\t\t0x08(08)\tAuto Sleep disable\n bus.write_byte_data(0x53, 0x2D, 0x08)\n\n # ADXL345 address, 0x53(83)\n # Select data format register, 0x31(49)\n #\t\t0x08(08)\tSelf test disabled, 4-wire interface\n #\t\t\t\t\tFull resolution, Range = +/-2g\n\n bus.write_byte_data(0x53, 0x31, 0x08)\n # time.sleep(0.5)\n # ADXL345 address, 0x53(83)\n # Read data back from 0x32(50), 2 bytes\n # X-Axis LSB, X-Axis MSB\n\n data0 = bus.read_byte_data(0x53, 0x32)\n data1 = bus.read_byte_data(0x53, 0x33)\n\n # Convert the data to 10-bits\n xAccl = ((data1 & 0x03) * 256) + data0\n\n if xAccl > 511:\n xAccl -= 1024\n # ADXL345 address, 0x53(83)\n # Read data back from 0x34(52), 2 bytes\n # Y-Axis LSB, Y-Axis MSB\n data0 = bus.read_byte_data(0x53, 0x34)\n data1 = bus.read_byte_data(0x53, 0x35)\n\n # Convert the data to 10-bits\n yAccl = ((data1 & 0x03) * 256) + data0\n\n if yAccl > 511:\n yAccl -= 1024\n\n # ADXL345 address, 0x53(83)\n # Read data back from 0x36(54), 2 bytes\n # Z-Axis LSB, Z-Axis MSB\n data0 = bus.read_byte_data(0x53, 0x36)\n data1 = bus.read_byte_data(0x53, 0x37)\n\n # Convert the data to 10-bits\n zAccl = ((data1 & 0x03) * 256) + data0\n if zAccl > 511:\n zAccl -= 1024\n\n # Output data to screen\n # print \"Acceleration in X-Axis : %d\" %xAccl\n # print \"Acceleration in Y-Axis : %d\" %yAccl\n # print \"Acceleration in Z-Axis : %d\" %zAccl\n new_val = zAccl\n self.num_ticks += 1\n\n # grab the existing data, truncate it, and append the new point.\n # This isn't the most efficient thing in the world but it works.\n cur_data = self.viewer.data\n new_data = np.hstack((cur_data[-self.max_num_points+1:], [new_val]))\n new_index = np.arange(self.num_ticks - len(new_data) + 1,\n self.num_ticks + 0.01)\n\n self.viewer.index = new_index\n self.viewer.data = new_data\n return" ]
[ "0.66846615", "0.6139364", "0.61318266", "0.6100957", "0.59762603", "0.59737635", "0.5934688", "0.5893882", "0.5851181", "0.5845216", "0.58336294", "0.5833296", "0.5828705", "0.5823165", "0.57837117", "0.57669115", "0.57583517", "0.5741563", "0.57170486", "0.5711301", "0.5664764", "0.56034464", "0.5581813", "0.5572793", "0.55690175", "0.5567378", "0.5543615", "0.5528588", "0.5516873", "0.55128056" ]
0.6794902
0
read the LSM value for certain time and return the value and write to offset register, make sure the chip is leveled and calm lsm arduino library
def LSM_offset(add, timer_out=1000): # initialize timer = 0 m_min = [32767, 32767, 32767] m_max = [-32768, -32768, -32768] # lsm reading lsm_result = LSM_acquisition(add) # loop to update min and max while timer <= timer_out: m = lsm_result.next()[3:6] m_min = [min(m_min[i], m[i]) for i in range(3)] m_max = [max(m_max[i], m[i]) for i in range(3)] timer += 1 print [m_min, m_max] return [m_min, m_max]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_sensor(sensor: int, gpio_pin: int) -> (int, float, float, datetime):\r\n logging.debug('reading sensor')\r\n hum_rh, temp_c = Adafruit_DHT.read_retry(sensor, gpio_pin)\r\n if hum_rh is None or temp_c is None:\r\n logging.error(\"failed to read from the sensor\")\r\n return 1, 0, 0, datetime.now()\r\n logging.debug('sensor data: RH: {}, Tc: {}'.format(hum_rh, temp_c))\r\n return 0, hum_rh, temp_c, datetime.now()", "def LSM_acquisition(add):\n # control register\n CTRL0 = 0x1f # p.34, accelerator\n CTRL1 = 0x20\n CTRL2 = 0x21\n CTRL5 = 0x24 # p.36, magnetic\n CTRL6 = 0x25\n CTRL7 = 0x26\n FIFO_CTRL = 0x2e # p.40\n # accelerater\n OUT_X_L_A = 0x28\n OUT_X_H_A = 0x29\n OUT_Y_L_A = 0x2a\n OUT_Y_H_A = 0x2b\n OUT_Z_L_A = 0x2c\n OUT_Z_H_A = 0x2d\n # magentic\n OUT_X_L_M = 0x08\n OUT_X_H_M = 0x09\n OUT_Y_L_M = 0x0a\n OUT_Y_H_M = 0x0b\n OUT_Z_L_M = 0x0c\n OUT_Z_H_M = 0x0d\n\n # follow lsm303D arduino library\n # AFS = 0, +-2g scale\n bus.write_byte_data(add, CTRL2, 0x00)\n # 50 Hz AODR, all axis enable\n bus.write_byte_data(add, CTRL1, 0x57)\n # high resolution, 6.25Hz MODR\n bus.write_byte_data(add, CTRL5, 0x64)\n # +-4 gauss scale\n bus.write_byte_data(add, CTRL6, 0x20)\n # low power mode off, continuous conversion mode\n bus.write_byte_data(add, CTRL7, 0x00)\n # # FIFO mode\n # bus.write_byte_data(add, CTRL0, 0b01000000)\n # bus.write_byte_data(add, FIFO_CTRL, 0b01000000)\n # # accelerator with 12.5Hz, all axis enable\n # bus.write_byte_data(add, CTRL1, 0b00110111)\n # # magnetic 12.5Hz, high resolutn, temp en\n # bus.write_byte_data(add, CTRL5, 0b11100000)\n # # full scale range \\pm 12 gauss\n # bus.write_byte_data(add, CTRL6, 0b01101000)\n # # enable magnetic\n # bus.write_byte_data(add, CTRL7, 0x00)\n\n # accelerator accumulate\n while True:\n uint16_ax = (bus.read_byte_data(add, OUT_X_H_A) << 8) + \\\n bus.read_byte_data(add, OUT_X_L_A)\n uint16_ay = (bus.read_byte_data(add, OUT_Y_H_A) << 8) + \\\n bus.read_byte_data(add, OUT_Y_L_A)\n uint16_az = (bus.read_byte_data(add, OUT_Z_H_A) << 8) + \\\n bus.read_byte_data(add, OUT_Z_L_A)\n\n uint16_mx = (bus.read_byte_data(add, OUT_X_H_M) << 8) + \\\n bus.read_byte_data(add, OUT_X_L_M)\n uint16_my = (bus.read_byte_data(add, OUT_Y_H_M) << 8) + \\\n bus.read_byte_data(add, OUT_Y_L_M)\n uint16_mz = (bus.read_byte_data(add, OUT_Z_H_M) << 8) + \\\n bus.read_byte_data(add, OUT_Z_L_M)\n # accelerometer 12 bit left aligned\n # ax = twos_comp(uint16_ax>>4, 12)\n # ay = twos_comp(uint16_ay>>4, 12)\n # az = twos_comp(uint16_az>>4, 12)\n ax = twos_comp(uint16_ax, 16)\n ay = twos_comp(uint16_ay, 16)\n az = twos_comp(uint16_az, 16)\n\n mx = twos_comp(uint16_mx, 16)\n my = twos_comp(uint16_my, 16)\n mz = twos_comp(uint16_mz, 16)\n\n yield [ax, ay, az, mx, my, mz]", "def read(self):\n \n #self.lego_bus.write_byte(self.address, 0xF5)\n #time.sleep(0.2)\n block=0\n rawhumidity=0\n counter=0\n toRead=0\n while counter < 10 and toRead != 3:\n self.lego_bus.write_byte(self.address, 0xFE)\n self.lego_bus.write_byte(self.address, 0xF5)\n sleep(1)\n \n try:\n block = self.lego_bus.read_i2c_block_data(self.address, 0, 3)\n print (block)\n rawhumidity = (( block[0] << 8) | block[1])\n print (rawhumidity)\n except:\n print(\"Error inesperado:\", sys.exc_info()[0])\n counter +=1\n \n\n rh=999 # if invalid checksum return error value\n if (self.check_crc(rawhumidity, block[2]) == 0): #Verify the checksum \n #rawhumidity &= 0xFFFC\n print (rawhumidity)\n tempRH = rawhumidity / 65536.0\n rh = -6.0 + (125.0 * tempRH)\n return rh", "def read_level(self):\n addresse = 0x48\n self.bus.write_byte(addresse,self.channel)\n value = self.bus.read_byte(addresse)\n time.sleep(1)\n volts = self.convert_volts(value,2)\n self.write_level(volts)\n alerteur = Alerteur()\n if volts < self.seuil_min:\n alerteur.add_alert(self.module_name, \"Batterie faible.\")\n else:\n alerteur.remove_alert(self.module_name)\n return volts", "def track_moisture_level():\n try:\n normal_level_init = 470\n low_level_init = 560\n\n global LIMIT_FLAG\n sensor_read = sensorData.read_moisture()\n generate_json.define_structure(\"moisture\", sensor_read)\n\n if sensor_read > low_level_init:\n if LIMIT_FLAG != 3:\n # When it is dry (Moisture Level Low)\n LIMIT_FLAG = 3\n blynk.notify('Moisture Level Low! Irrigation Needed')\n blynk.email('[email protected]', 'Alert: Moisture Level Low',\n 'Moisture Level Low! Irrigation Needed')\n logging_write()\n elif normal_level_init <= sensor_read <= low_level_init:\n if LIMIT_FLAG != 2:\n LIMIT_FLAG = 2\n logging_write()\n else:\n if LIMIT_FLAG != 1:\n LIMIT_FLAG = 1\n logging_write()\n return sensor_read\n\n except Exception as e:\n logging_write(e)", "def read_core_vbat(self) -> float:", "def get_now_measure():\n try:\n AD_pin = 2 # Using ADIn2 channel\n ADC.write_byte_data(0x48, (0x40 + AD_pin), AD_pin)\n ad_val = ADC.read_byte(0x48)\n return ad_val\n except AttributeError:\n return \"0\"", "def read_led(self, pin):\n value = 0 #Default to nowt\n if self.iface.connected:\n try:\n value = self.iface.get_PWM_dutycycle(pin)\n except (AttributeError, IOError, pigpio.error):\n logging.error(\" Cannot read PWM of pin #%s\" % (pin,))\n else:\n logging.error(\" Interface not connected. Cannot read PWM of pin #%s.\" % (pin,))\n return value", "def read(self):\n self.pi.write(self.gpio, pigpio.LOW)\n time.sleep(0.017) # 17 ms\n self.pi.set_mode(self.gpio, pigpio.INPUT)\n self.pi.set_watchdog(self.gpio, 200)\n time.sleep(0.2)", "def read_line(self):\n self.read_calibrated()\n\n avg = 0\n summ = 0\n online = False\n\n for i in range(0, self.NUM_SENSORS):\n val = self.sensorValues[i]\n if val > 500: online = True\n if val > 50:\n multiplier = i * 1000\n avg += val * multiplier\n summ += val\n\n if online == False:\n if self.lastValue < (self.NUM_SENSORS-1)*1000/2:\n return 0\n else:\n return (self.NUM_SENSORS-1)*1000\n\n self.lastValue = avg/summ\n return self.lastValue", "def read_actual_current(self):\n function_string = 'I' + self.output + 'O?'\n value_string = self.scpi_comm(function_string)\n time.sleep(0.1) # This might only be necessary on LAN interface\n try:\n value = float(value_string.replace('A', ''))\n except ValueError:\n value = -9998\n return value", "def read_sensor_data():\n global light_scheme_set, current_timeout\n\n # prevents very rapid changes of the color scheme\n if current_timeout is not 0:\n current_timeout -= 1\n return\n else:\n # call the shared library's sensor code\n reading = dll.readSensor()\n scheme = None\n\n # check if the scheme needs to be changed\n if reading >= settings.get('threshold') and light_scheme_set is not True:\n scheme = settings.get('light_color_scheme')\n light_scheme_set = True\n\n elif reading < settings.get('threshold') and light_scheme_set is not False:\n scheme = settings.get('dark_color_scheme')\n light_scheme_set = False\n\n # change user settings\n if scheme is not None:\n global_settings = sublime.load_settings('Preferences.sublime-settings')\n if global_settings.get('color_scheme') != scheme:\n global_settings.set('color_scheme', scheme)\n sublime.save_settings('Preferences.sublime-settings')\n current_timeout = settings.get('cycle_timeout')", "def read_tsl2561(self):\n try:\n # Create the I2C bus\n i2c = busio.I2C(board.SCL, board.SDA)\n # Create the TSL2561 instance, passing in the I2C bus\n tsl = adafruit_tsl2561.TSL2561(i2c)\n # Print chip info\n print(\"Chip ID = {}\".format(tsl.chip_id))\n print(\"Enabled = {}\".format(tsl.enabled))\n print(\"Gain = {}\".format(tsl.gain))\n print(\"Integration time = {}\".format(tsl.integration_time))\n print(\"Configuring TSL2561...\")\n print(\"Configuring TSL2561...\")\n # Enable the light sensor\n tsl.enabled = True\n time.sleep(1)\n # Set gain 0=1x, 1=16x\n tsl.gain = 0\n # Set integration time (0=13.7ms, 1=101ms, 2=402ms, or 3=manual)\n tsl.integration_time = 1\n # print(\"Getting readings...\")\n print(\"Getting readings....\")\n # Get raw (luminosity) readings individually\n broadband = tsl.broadband\n infrared = tsl.infrared\n # Get raw (luminosity) readings using tuple unpacking\n # broadband, infrared = tsl.luminosity\n # Get computed lux value (tsl.lux can return None or a float)\n lux = tsl.lux\n # Print results\n # print(\"Enabled = {}\".format(tsl.enabled))\n print(\"Enabled = {}\".format(tsl.enabled))\n # print(\"Gain = {}\".format(tsl.gain))\n print(\"Gain = {}\".format(tsl.gain))\n # print(\"Integration time = {}\".format(tsl.integration_time))\n print(\"Integration time = {}\".format(tsl.integration_time))\n # print(\"Broadband = {}\".format(broadband))\n print(\"Broadband = {}\".format(broadband))\n # print(\"Infrared = {}\".format(infrared))\n print(\"Infrared = {}\".format(infrared))\n # if lux is not None:\n # print(\"Lux = {}\".format(lux))\n # else:\n # print(\"Lux value is None. Possible \\\n # sensor underrange or overrange.\")\n # Disble the light sensor (to save power)\n tsl.enabled = False\n print('read light data: ')\n print(lux)\n print(infrared)\n print(broadband)\n return lux, infrared, broadband\n except BaseException as e:\n print('An exception occurred: {}'.format(e))", "def get_resistance(self):\n adc = ADC(self.pin)\n value = adc.read()\n if value == 0:\n return -1\n\n return (4095./value - 1.) * self.RLOAD # ESP32 maksimi, ESP8266:lle arvo on 1023", "def get_actuator_status(message, arduino):\n as_timer_reset = 0\n\n cpt = 5\n for actuator_name in arduino['actuator_status'] :\n\n if arduino['as_timer'] >= 5 :\n message_console = \"5s elapsed since last send\"\n as_timer_reset = 1\n send_AStoDataBase(arduino['id'], actuator_name, arduino['actuator_status'][actuator_name])#Fonction Corentin\n\n if message[cpt] == \"T\" and arduino['actuator_status'][actuator_name] != \"OK\":\n arduino['actuator_status'][actuator_name] = \"OK\"\n message_console = \"CHANGE\"\n as_timer_reset = 1\n send_AStoDataBase(arduino['id'], actuator_name, arduino['actuator_status'][actuator_name])#Fonction Corentin\n\n cpt+=1\n\n if as_timer_reset == 1 :\n print(\"Mechanism %s : actuator status : %s --- database update ---\" %(arduino['id'], message_console))\n arduino['as_timer'] = 0\n as_timer_thread = ASTimer(arduino)\n as_timer_thread.start()", "def determine_intensity_single_channel(pi, pin_light, i2c_multiplexer_handle, i2c_sensor_handle, channel_number):\n pi.write(pin_light, 1)\n i2c_multiplexer_select_channel(pi,\n i2c_multiplexer_handle, channel_number)\n intensity = i2c_sensor_handle.ch0_light\n timepoint = time.time()\n time.sleep(0.25)\n pi.write(pin_light, 0)\n return timepoint, intensity", "def IR_sensor(self):\n self.serial.reset_input_buffer() # clear buffer\n self.send(b\"kk\\n\")\n # next line depends on read timeout\n result = self.serial.read(1)\n if result == b'':\n print(\"no IR data returned\")\n return 2 # if 2 returned do it again\n else:\n result = int.from_bytes(result, \"big\")\n return result", "def hp3458a_read_voltage(hp_meter):\n hp_meter.write(\"TARM SGL\")\n return float(hp_meter.read())", "def sensorInfo(timeDelay = .5):\n cycle = time.time()\n while (time.time() - cycle < timeDelay):\n pass\n return random.randint(1,10000)", "def read(self, P=1013000, S=35000):\n # get time at start of measurement\n tnow = time.strftime('%Y-%m-%d-%H:%M:%S', time.localtime())\n\n # measure Temp\n self.sensor.write('TMP 1\\r')\n self.sensor.readline()\n\n # adjust environment parameters for O2 measurement\n envpar = 'ENV 1 -300000 {:.0f} {:.0f} \\r'.format(P, S)\n self.sensor.write(envpar)\n # Notes:\n # T: -300000 uses last temperature measurement\n # P: Ambient pressure in ubar (1000000 = 1 bar)\n # S: Salinity in mg/L (1000 = 1 g/L)\n self.sensor.readline()\n\n # measure O2\n self.sensor.write('MSR 1\\r')\n self.sensor.readline()\n\n # read all results\n self.sensor.write('RAL 1\\r')\n res = self.sensor.readline()\n\n # format data\n res = res.replace('RAL 1 ', '').rstrip()\n res = [int(r) for r in res.split(' ')]\n\n self.last_read = [tnow] + res\n return [tnow] + res", "def read(SCK=15, DAT=13):\n\n data = Pin(DAT, Pin.IN)\n sck = Pin(SCK, Pin.OUT, value=0)\n \n initialFreq = freq() # so we can slow it down afterwards\n freq(160000000) # hx711 needs a high clock frequency :o\n \n value = 0\n \n if data.value() == 0:\n\n for i in range(24):\n sck.value(1)\n sck.value(0)\n value = value << 1 | data.value()\n\n for j in range(1):\n sck.value(1)\n sck.value(0)\n\n # convert 2's complement to integer\n if value & (1 << (24 - 1)):\n value -= 1 << 24\n\n freq(initialFreq) # back to initialFreq\n \n return value", "def READ_PRESSURE_SENSOR():\n return 15.246", "def readLevelTimerValue(self):\n if self.level_timer_address == None:\n raise RuntimeError(\"The timer address hasn't been acquired\")\n\n frames_elapsed = self.readUInt32(self.level_timer_address)\n return frames_elapsed", "def getLightSensor() -> int:\n pass", "def read_lumi_counter(device):\n return read(device, \"gt_mp7_frame.rb.tcm_status.luminosity_seg_nr\")", "def pulse_lo(pin, length=0.00001):\n off(pin)\n time.sleep(length)\n on(pin)\n time.sleep(length)", "def test_str_time_2(self):\n xknx = XKNX()\n sensor = Sensor(\n xknx, \"TestSensor\", group_address_state=\"1/2/3\", value_type=\"time_2\"\n )\n sensor.sensor_value.payload = DPTArray(\n (\n 0xFB,\n 0x29,\n )\n )\n\n self.assertEqual(sensor.resolve_state(), -405995.52)\n self.assertEqual(sensor.unit_of_measurement(), \"ms\")\n self.assertEqual(sensor.ha_device_class(), None)", "def read(self):\n try:\n cmd = 'SAMP:COUN 1' \n self.handle.write(cmd) #one sample per trigger\n self.handle.write('TRIG:SOUR BUS') #triggered by command\n self.handle.write('TRIG:COUN 1') #one trigger to return to wait for trg\n self.handle.write('INIT:IMM') #DVM to \"wait for trigger\" \n self.handle.write('*TRG')\n startTime = time.time()\n while True: #wait until measuring flag goes to 0\n try:\n measured = self.handle.ask(\"DATA:POIN?\")\n measured = measured.strip() #remove CR \n measured = int(measured) #convert to number\n if measured == 1: #final number of samples achieved\n break;\n except Exception:\n print('Dvm34411:read() polling failed !')\n raise\n \n if time.time() - startTime > self.timeout:\n print('Dvm34411:read() timeout !')\n return False\n \n time.sleep(1) \n reading = self.handle.ask('R? 1;') #definite-Length block format\n except Exception:\n print('Dvm34411.read() failed !')\n raise\n if reading[0] != '#':\n print('Dvm34411.read() DLB format error - # expected !')\n return False\n digits = int(reading[1])\n reading = reading[2 + digits:]\n rdg = float(reading)\n return rdg", "def _read(self, pin):\n # Immediately return conversion register result if in CONTINUOUS mode\n # and pin has not changed\n if self.mode == Mode.CONTINUOUS and self._last_pin_read == pin:\n raw_adc = self._read_register(_ADS1X15_POINTER_CONVERSION, True)\n raw_adc = raw_adc.to_bytes(2, \"big\")\n return struct.unpack(\">h\", raw_adc)[0] >> self._shift_fact\n\n # Assign last pin read if in SINGLE mode or first sample in CONTINUOUS\n # mode on this pin\n self._last_pin_read = pin\n\n # Configure ADC every time before a conversion in SINGLE mode\n # or changing channels in CONTINUOUS mode\n config = _ADS1X15_CONFIG_OS_SINGLE if self.mode == Mode.SINGLE else 0\n config |= (pin & 0x07) << _ADS1X15_CONFIG_MUX_OFFSET\n config |= _ADS1X15_CONFIG_GAIN[self.gain]\n config |= self.mode\n config |= self.rate_config[self.data_rate]\n config |= _ADS1X15_CONFIG_COMP_QUE_DISABLE\n self._write_register(_ADS1X15_POINTER_CONFIG, config)\n\n # Wait for conversion to complete\n # ADS1x1x devices settle within a single conversion cycle\n if self.mode == Mode.SINGLE:\n # Continuously poll conversion complete status bit\n #while not self._conversion_complete():\n while not self._read_register(_ADS1X15_POINTER_CONFIG) & 0x8000:\n pass\n else:\n # Can't poll registers in CONTINUOUS mode\n # Wait expected time for two conversions to complete\n time.sleep(2 /self.data_rate)\n\n raw_adc = self._read_register(_ADS1X15_POINTER_CONVERSION, False)\n raw_adc = raw_adc.to_bytes(2, \"big\")\n return struct.unpack(\">h\", raw_adc)[0] >> self._shift_fact", "def hp34401a_read_voltage(hp_meter):\n hp_meter.write(\"MEAS:VOLT:DC? DEF,DEF\")\n return float(hp_meter.read())" ]
[ "0.59235793", "0.59106934", "0.5670082", "0.5641416", "0.5617479", "0.56003934", "0.5563454", "0.5529551", "0.5488265", "0.5479008", "0.5411189", "0.53775793", "0.5370738", "0.53605217", "0.53589904", "0.5346288", "0.5344704", "0.5330145", "0.53069305", "0.52879274", "0.52723193", "0.5258323", "0.52527213", "0.52388084", "0.5230216", "0.52169794", "0.5213429", "0.52127934", "0.5193907", "0.51912457" ]
0.6330764
0
Delete URI and return the number of bytes deleted.
def delete_uri( self, uri: str, logger: Optional[logging.Logger] = default_logger ) -> int: local_dir = get_local_dir_from_uri(uri, self._resources_dir) local_dir_size = get_directory_size_bytes(local_dir) deleted = delete_package(uri, self._resources_dir) if not deleted: logger.warning(f"Tried to delete nonexistent URI: {uri}.") return 0 return local_dir_size
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete(self, _uri):\n print(\"Deleting '%s'\"%(_uri))\n response = self.__httpsRequest('DELETE', _uri, '')", "def count_deleted_bytes(self): # FileObj.count_deleted_bytes\n if self.deleted:\n return self.bytes \n else:\n return 0", "def delete(cls, uri):\n return cls._perform_request(uri, 'DELETE')", "def count_deleted_bytes(self): # EntryList.count_deleted_bytes\n bytes=0\n for name, e in self.contents.iteritems():\n bytes = bytes + e.count_deleted_bytes()\n return bytes", "def _delete(self, uri, headers=None):\n if self.openam_url[-1:] == '/':\n openam_path = self.openam_url + uri\n else:\n openam_path = self.openam_url + \"/\" + uri\n\n try:\n data = requests.delete(openam_path, headers=headers, timeout=self.timeout, verify=self.verify)\n except requests.exceptions.RequestException as e:\n data = {'error': e}\n return data", "def count_deleted_bytes(self): # DirObj.count_deleted_bytes\n bytes=0\n for name, d in self.subdirs.iteritems():\n bytes = bytes + d.count_deleted_bytes()\n for name, f in self.files.iteritems():\n if f.deleted:\n bytes = bytes + f.count_deleted_bytes()\n return bytes", "def delete(self, uri, **kwargs):\n return self.session.delete(uri, **kwargs)", "def count_deleted(self): # FileObj.count_deleted\n if self.deleted:\n return 1\n else:\n return 0", "def delete(self, uri, body=None, headers=None, auth=False):\n return self.send_request('DELETE', uri, body, headers, auth)", "def count_deleted(self):\n count = 0\n for _, e in self.contents.items():\n count = count + e.count_deleted()\n return count", "def delete(self):\r\n request = http.Request('DELETE', self.get_url())\r\n\r\n return request, parsers.parse_empty", "async def delete(self, delete: TPayload) -> None:", "def Delete(self, path):\n\n # try to request\n try:\n request = requests.delete(self.config[\"url\"] + \"/weaviate/v1\" + path)\n except urllib.error.HTTPError as error:\n return None\n\n return request.status_code", "def delete(self, *args, **kw):\n kw['method'] = 'DELETE'\n return self.open(*args, **kw)", "def do_delete_request(self, uri, headers, timeout_ms):\n return self._do_request('DELETE', uri, headers, None, timeout_ms, None)", "def count_bytes(self, deleted=False):\n b = 0\n for _, e in self.contents.items():\n b = b + e.count_bytes(deleted)\n return b", "def delete(self, uri, where, selectionArgs):\n pass", "def delete(self, url):\n return self._query(url, 'DELETE')", "async def _delete(self, key):\n return 1 if await self.client.delete(key) else 0", "def count_deleted(self): # EntryList.count_deleted\n count=0\n for name, e in self.contents.iteritems():\n count = count + e.count_deleted()\n return count", "def unmanaged_delete(task_id, url):\n\n PoolManager.db.query('DELETE FROM `unmanaged_deletions` WHERE `id` = %s', task_id)\n\n try:\n stat_result = gfal_exec('stat', (url,), return_value = True)\n except:\n return 0, None, None, 'stat error', ''\n\n if stat.S_ISDIR(stat_result.st_mode):\n # this is a directory\n result = gfal_exec('rmdir', (url,))\n else:\n result = gfal_exec('unlink', (url,))\n\n return (0,) + rmdir_result[1:]", "def verify_delete(url, header):\n test_result_flag = False\n\n getmsg = http.get(url, header)\n if getmsg.status_code == 404:\n test_result_flag = True\n else:\n print('GET after DELETE failed')\n print('URL')\n print(url)\n print('headers')\n print(header)\n print('Response Body')\n print(getmsg.text)\n print('GET Code {}'.format(getmsg.status_code))\n\n return test_result_flag", "def delete(self, *args, **kwargs):\n return 0", "def delete(self, name):\n self.connect()\n self._write('DEL %s\\r\\n' % name)\n return self._get_numeric_response()", "def delete_dir(url_prefix, rse):\n try:\n endpoint, bucket_name, key_name = _get_endpoint_bucket_key(url_prefix)\n bucket = _get_bucket(rse, endpoint, bucket_name)\n i = 0\n keys = []\n for key in bucket.list(prefix=key_name):\n keys.append(key.name)\n i += 1\n if i == 1000:\n ret = _delete_keys(bucket, keys)\n for ret_key in ret:\n if ret[ret_key]['status'] != 0:\n return ret[ret_key]['status'], ret[ret_key]['output']\n i = 0\n keys = []\n if len(keys):\n ret = _delete_keys(bucket, keys)\n for ret_key in ret:\n if ret[ret_key]['status'] != 0:\n return ret[ret_key]['status'], ret[ret_key]['output']\n return 0, None\n except:\n return -1, \"Failed to delete dir: %s, error: %s\" % (url_prefix, traceback.format_exc())", "def delete(self, url):\n return self.request(url, \"DELETE\")", "def _delete(self, url):\n return self._request(url, method=\"DELETE\")", "def http_delete_and_get_check(url):\n delete_resp_obj = RestClientApis.http_delete_and_check_success(url)\n json_body = delete_resp_obj.json_body\n response_object = delete_resp_obj.response_object\n if delete_resp_obj.success:\n # if delete was successful\n get_resp_obj = RestClientApis.http_get_and_check_success(url)\n response_object = get_resp_obj.response_object\n json_body = get_resp_obj.json_body\n if get_resp_obj.http_status == HTTPStatus.NOT_FOUND:\n # if resource was not found we are good\n success = True\n return_code = HTTPStatus.OK\n message = HTTPStatus.OK.phrase\n else:\n success = False\n return_code = HTTPStatus.INTERNAL_SERVER_ERROR\n message = HTTPStatus.INTERNAL_SERVER_ERROR.phrase\n else:\n success = False\n return_code = delete_resp_obj.http_status\n message = delete_resp_obj.message\n rest_return_obj = RestReturn(success=success, message=message, http_status=return_code,\n json_body=json_body, response_object=response_object)\n return rest_return_obj", "def delete(self,filename):\n\n try:\n self.ftp.delete(filename)\n except:\n print('Error deleting remote file:%s'%filename)\n return 1\n\n return 0", "def delete():" ]
[ "0.67140555", "0.65919757", "0.65536815", "0.6445002", "0.60951376", "0.6051026", "0.60461366", "0.5974676", "0.59423614", "0.58661956", "0.5857979", "0.58498526", "0.58419967", "0.58417064", "0.5830656", "0.58267665", "0.5780797", "0.57706386", "0.5765554", "0.5755444", "0.5740302", "0.5722716", "0.5711911", "0.56735986", "0.5650223", "0.56405574", "0.56371707", "0.56285644", "0.55988556", "0.5589015" ]
0.74127245
0
Download a jar URI.
async def _download_jars( self, uri: str, logger: Optional[logging.Logger] = default_logger ): try: jar_file = await download_and_unpack_package( uri, self._resources_dir, self._gcs_aio_client, logger=logger ) except Exception as e: raise RuntimeEnvSetupError( "Failed to download jar file: {}".format(e) ) from e module_dir = self._get_local_dir_from_uri(uri) logger.debug(f"Succeeded to download jar file {jar_file} .") return module_dir
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _download_from_url(self, url):\n target_file_name = self.dir + \"/\" + url.split('/')[-1].split('?')[0]\n urllib.urlretrieve (url, target_file_name)", "def _download(url, outpath=None, dirname=None, branch='master', release=None):\n six.print_('downloading...')\n outfolder = outpath or os.getcwd()\n file, archive_url = get_archive_url(url, branch, release)\n six.print_(archive_url)\n if dirname:\n outfolder = \"{}/{}.zip\".format(outfolder, dirname)\n return file, wget.download(archive_url, out=outfolder)", "def _download(self):\n self._system.download_file(\"http://curl.haxx.se/download/\" + self._tar_name)", "def download_addon(self, url, target_path):\n try:\n filename = url.split('?')[0].rstrip('/').rsplit('/', 1)[-1]\n target_path = os.path.join(target_path, filename)\n\n print \"Downloading %s to %s\" % (url, target_path)\n urllib.urlretrieve(url, target_path)\n\n return target_path\n except Exception, e:\n print e", "def download_url(filename, url):\n latest_package_url = request.urlopen(url).read().decode(\"utf-8\")\n print(\"Downloading latest package:\\n{}\".format(latest_package_url))\n request.urlretrieve(latest_package_url, filename, reporthook=download_progress_callback)", "def download_url(url, fd, handle=None):\n return _librepo.download_url(handle, url, fd)", "def download_file(download_url, save_path):\n url = \"https://www.encodeproject.org/\" + download_url\n urllib.request.urlretrieve(url, save_path)", "def _download(self):\n self._system.download(\"http://geant4.web.cern.ch/geant4/support/source/\" + self._tar_name)", "def download(self, download_path):\n return", "def download_url(url, path=None, name=None):\n r = requests.get(url, allow_redirects=True)\n if path:\n paths = []\n paths.append(path)\n make_dir_from_list(paths)\n open(os.path.join(paths[0], name), 'wb').write(r.content)\n return r.content.decode('utf-8')", "def _download(self, url, rel_path):\n \n tmp_dir = \"TMP_DIR=`mktemp -d`;\"\n wget_cmd = [ tmp_dir, \"wget\", \"-nv\", \"-O\", \"$TMP_DIR/archive.tgz\", url, \";\" ]\n wget_cmd = ' '.join(wget_cmd)\n \n mkdir_cmd = \"mkdir -p %s ;\" % (\"./remote_resources/\" + rel_path)\n \n cleandir_cmd = \"rm -Rf %s/* ;\" % (\"./remote_resources/\" + rel_path)\n \n untar_cmd = [ \"tar\", \"xf\", \"$TMP_DIR/archive.tgz\", \"-C\", \"./remote_resources/%s\" % rel_path, \";\" ]\n untar_cmd = ' '.join(untar_cmd)\n \n remove_cmd = \"rm -Rf $TMP_DIR;\"\n \n return self._ssh(' '.join([ wget_cmd, mkdir_cmd, cleandir_cmd, untar_cmd, remove_cmd ]))", "def download(url, filename=None):\n\t# requirements os, shutil, urllib.parse, urllib.request\n\tif not filename:\n\t\turl_parts = urllib.parse.urlparse(url)\n\t\tfilename = os.path.basename(url_parts.path)\n\turl_h = urllib.request.urlopen(url)\n\twith open(filename, 'wb') as file_h:\n\t\tshutil.copyfileobj(url_h, file_h)\n\turl_h.close()\n\treturn", "def download(self, url):\n try:\n webFile = urllib.urlopen(url)\n localFile = open(self.workdir + \"/\" + url.split('/')[-1], 'w')\n localFile.write(webFile.read())\n webFile.close()\n localFile.close()\n except IOError:\n print(\"could not get url \" + url)", "def __download(self):\n\n # Use the default repository if set to True\n if self.repository is True:\n self.repository = self.__default_repository\n\n if not self.repository and not self.url:\n tarball = 'ucx-{}.tar.gz'.format(self.__version)\n self.url = '{0}/v{1}/{2}'.format(self.__baseurl, self.__version,\n tarball)", "def download(self, url):\n url = URL(url)\n downloader = getattr(self, 'download_%s' % url.scheme, None)\n if downloader is None:\n msg = \"We haven't implemented the '%s' protocol yet.\" % url.scheme\n raise NotImplementedError(msg)\n fp = None\n else:\n fp = downloader(url)\n return fp", "def download(self):\n file_url = posixpath.join(self.mirrors, self.resources)\n _urlretrieve(file_url, os.path.join(self.root, self.resources))", "def download(self):\n file_url = posixpath.join(self.mirrors, self.resources)\n _urlretrieve(file_url, os.path.join(self.root, self.resources))", "def _Download(url):\n response = urllib2.urlopen(url)\n if response.code != 200:\n raise RuntimeError('Failed to download \"%s\".' % url)\n return response.read()", "def torrent_download(download_url, torrent):\n webFile = urllib.urlopen(download_url)\n localFile = open(torrent, 'wb')\n localFile.write(webFile.read())\n webFile.close()\n localFile.close()", "def _download(self, link):\n # Based on pip 1.4.1's URLOpener but with cert verification removed\n def opener(is_https):\n if is_https:\n opener = build_opener(HTTPSHandler())\n # Strip out HTTPHandler to prevent MITM spoof:\n for handler in opener.handlers:\n if isinstance(handler, HTTPHandler):\n opener.handlers.remove(handler)\n else:\n opener = build_opener()\n return opener\n\n # Descended from unpack_http_url() in pip 1.4.1\n def best_filename(link, response):\n \"\"\"Return the most informative possible filename for a download,\n ideally with a proper extension.\n\n \"\"\"\n content_type = response.info().get('content-type', '')\n filename = link.filename # fallback\n # Have a look at the Content-Disposition header for a better guess:\n content_disposition = response.info().get('content-disposition')\n if content_disposition:\n type, params = cgi.parse_header(content_disposition)\n # We use ``or`` here because we don't want to use an \"empty\" value\n # from the filename param:\n filename = params.get('filename') or filename\n ext = splitext(filename)[1]\n if not ext:\n ext = mimetypes.guess_extension(content_type)\n if ext:\n filename += ext\n if not ext and link.url != response.geturl():\n ext = splitext(response.geturl())[1]\n if ext:\n filename += ext\n return filename\n\n # Descended from _download_url() in pip 1.4.1\n def pipe_to_file(response, path):\n \"\"\"Pull the data off an HTTP response, and shove it in a new file.\"\"\"\n # TODO: Indicate progress.\n with open(path, 'wb') as file:\n while True:\n chunk = response.read(4096)\n if not chunk:\n break\n file.write(chunk)\n\n url = link.url.split('#', 1)[0]\n try:\n response = opener(urlparse(url).scheme != 'http').open(url)\n except (HTTPError, IOError) as exc:\n raise DownloadError(link, exc)\n filename = best_filename(link, response)\n pipe_to_file(response, join(self._temp_path, filename))\n return filename", "def _download_from_url(self) -> bytes:\n response = requests.get(self.url, allow_redirects=True)\n return response.content", "def download(url, target):\n # Add progress bar via:\n # http://stackoverflow.com/a/22776/317916\n if not url:\n return None\n urlretrieve(url, target)\n return target", "def download(url, out_folder):\n \n filename = \"2.png\"\n \n outpath = os.path.join(out_folder, filename)\n \n if url.lower().startswith(\"http\"):\n urlretrieve(url, outpath)\n else:\n urlretrieve(urlparse.urlunparse(parsed), outpath)", "def download(self, url):\n try:\n logging.info(self.log_format((\"downloading \" + url)))\n webFile = urllib.urlopen(url)\n localFile = open(self.paths['workspace'] + \"/\" + url.split('/')[-1], 'w')\n localFile.write(webFile.read())\n webFile.close()\n localFile.close()\n except IOError:\n logging.error(self.log_format((\"could not get url \" + url)))", "def download():\n try:\n cli.run(\n [URL, '--output', TEMP_DIR],\n )\n except SystemExit:\n return None", "def download_file(url, target_pkg_dir, filename):\n abs_file_path = \"/\".join([target_pkg_dir, filename])\n try:\n urllib.request.urlretrieve(url, abs_file_path)\n except Exception as ex:\n raise Exception(\"HTTP error for url: {url}\\nError message: {msg}\\nHTTP code: {code}\".format(\n url=ex.url, msg=ex.msg, code=ex.code))", "def download_zip(self, path: Path) -> Path:\n if not self.url:\n raise ValueError(\"Release must have a valid url to download the zip.\")\n\n with requests.get(self.url, stream=True) as response:\n with open(path, \"wb\") as download_file:\n shutil.copyfileobj(response.raw, download_file)\n\n return path", "def download(self, url: str, dest: PathLike, force: bool = False):", "def download_from_url(url, output_path):\n\n print('Pulling data from {} to {}'.format(url, output_path))\n wget.download(url, output_path)\n print('done')", "def _download_archive(self):\n _logger.debug('Downloading archive...')\n response = urlopen(self.url)\n\n with open(self._archive_full_path, 'wb') as archive_file:\n chunk_size = 1024 * 1024 # 1 MB\n chunk = response.read(chunk_size)\n\n while chunk:\n archive_file.write(chunk)\n chunk = response.read(chunk_size)\n\n _logger.debug('Archive {name} has been successfully downloaded.'.format(name=self.archive_name))" ]
[ "0.6878704", "0.6823005", "0.6765299", "0.67443496", "0.672472", "0.65095174", "0.64747316", "0.6447837", "0.63558346", "0.62665766", "0.62546325", "0.62373096", "0.6230308", "0.6222487", "0.62218195", "0.6216078", "0.6216078", "0.62140566", "0.6175067", "0.6155264", "0.61466146", "0.61437327", "0.61117184", "0.60770744", "0.6073118", "0.60377353", "0.6025895", "0.5996958", "0.5980831", "0.59766674" ]
0.69873255
0
Implements a polynomial learning rate of the form (1/nw)
def polynomial_learning_rate(n, w=0.5): assert n > 0, "Make sure the number of times a state action pair has been observed is always greater than 0 before calling polynomial_learning_rate" return 1./n**w
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def learning_rate(self, t):\n # return self.init_learning_rate * (1 - t)\n return self.init_learning_rate / (1 + t)\n # return self.init_learning_rate * exp(-t)\n # return self.init_learning_rate * (.005 / self.init_learning_rate) ** t", "def exponentialLearningRate(base):\n def function(t):\n return base ** (t-1)\n return function", "def __learning_rate(self, lr0, epoch):\n \n \"\"\"\n Dan's Methos\n \"\"\"\n lrs = lr0 * 0.001\n c = np.power((lrs/lr0), 1.0/self.__maxEpoch)\n \n return lr0*np.power(c, epoch)", "def polyFunction(x,weights):\n y=0\n for i in range (0,len(weights)):\n y+= weights[i]*(x**i)\n return y", "def get_learning_rate():\n return 0.00001", "def poly_adjust_learning_rate(optimizer, lr0, step, n_step):\n lr = lr0 * (1.0 - step*1.0/n_step)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr", "def update_weights(self):\n\n\n self.w += self.learn_rate * (self.X.T.dot(self.T - self.Y)\n - self.reg_L1 * np.sign(self.w)\n - self.reg_L2 * 2*self.w)", "def _decay_rate_pow(i: int, exponent: float = 0.8) -> float:\n t = jnp.array(i, jnp.float32) + 1.0\n return 1.0 - t**(-exponent)", "def fv(rate, n_years):\n return pow(1 + rate, n_years)", "def learning_rate(epoch):\n self.lr = self.lr / 1.00000001\n return self.lr", "def exp_incr_lr():\n maxlr_div_minlr = tf.divide(max_lr, min_lr)\n power_iter = tf.divide(global_step, num_iters)\n pow_div = tf.pow(maxlr_div_minlr, power_iter)\n return tf.multiply(min_lr, pow_div, name=name)", "def test_linear_in_rate(self):\n # reproducible arbitrariness\n np.random.seed(4901)\n\n alpha = 1.2\n\n self.conductor.out_step = np.random.randn(self.Nc)\n self.tutor.out_step = np.random.randn(self.Ns)\n\n W0 = np.copy(self.syns.W)\n\n sim = simulation.Simulation(self.conductor, self.student, self.tutor,\n self.syns, self.rule, dt=self.dt)\n sim.run(self.dt)\n\n change1 = self.syns.W - W0\n\n self.syns.W = np.copy(W0)\n self.rule.rate *= alpha\n sim.run(self.dt)\n\n change2 = self.syns.W - W0\n\n self.assertTrue(np.allclose(change2, alpha*change1))", "def test_linear_in_rate(self):\n # reproducible arbitrariness\n np.random.seed(4901)\n\n alpha = 1.2\n\n self.conductor.out_step = np.random.randn(self.Nc)\n self.tutor.out_step = np.random.randn(self.Ns)\n\n W0 = np.copy(self.syns.W)\n\n sim = simulation.Simulation(self.conductor, self.student, self.tutor,\n self.syns, self.rule, dt=self.dt)\n sim.run(self.dt)\n\n change1 = self.syns.W - W0\n\n self.syns.W = np.copy(W0)\n self.rule.rate *= alpha\n sim.run(self.dt)\n\n change2 = self.syns.W - W0\n\n self.assertTrue(np.allclose(change2, alpha*change1))", "def constantLearningRate(rate):\n def function(t):\n return rate\n return function", "def objective(self,w):\n diffs = self.get_y_times_diffs(self.get_split_weights(w))\n #print diffs, sigmoid(diffs)\n obj = -np.sum(np.log(sigmoid(diffs))) #negative, since minimising\n # regularisation\n obj += 0.5 * self.alpha * np.dot(w[:self.interp_index[0]], w[:self.interp_index[0]])\n return obj", "def objective(self,w):\n l = 0\n for i in range(len(self.x)):\n # Each example contributes log(sigma(y_i * x_i . w))\n l -= log(sigmoid(self.y[i] * np.dot(w, self.x[i,:])))\n # regularisation 1/2 * alpha * ||w||^2\n l += 0.5 * self.alpha * np.dot(w,w)\n return l", "def learning_rate(epoch):\n return alpha / (1 + decay_rate * epoch)", "def learning_rate(epoch):\n return alpha / (1 + decay_rate * epoch)", "def weighting(wb, m, a):\n s = control.tf([1, 0], [1])\n return (s/m + wb) / (s + wb*a)", "def updateLearnRate(\n self, phi, phi_prime, eligibility_trace, discount_factor, nnz, terminal\n ):\n\n if self.learn_rate_decay_mode == \"dabney\":\n # We only update learn_rate if this step is non-terminal; else phi_prime becomes\n # zero and the dot product below becomes very large, creating a very\n # small learn_rate\n if not terminal:\n # Automatic learning rate: [Dabney W. 2012]\n # http://people.cs.umass.edu/~wdabney/papers/alphaBounds.pdf\n candid_learn_rate = np.dot(\n discount_factor * phi_prime - phi, eligibility_trace\n )\n if candid_learn_rate < 0:\n self.learn_rate = np.minimum(\n self.learn_rate, -1.0 / candid_learn_rate\n )\n elif self.learn_rate_decay_mode == \"boyan\":\n self.learn_rate = (\n self.initial_learn_rate\n * (self.boyan_N0 + 1.0)\n / (self.boyan_N0 + (self.episode_count + 1) ** 1.1)\n )\n # divide by l1 of the features; note that this method is only called if phi != 0\n self.learn_rate /= np.sum(np.abs(phi))\n elif self.learn_rate_decay_mode == \"boyan_const\":\n # New little change from not having +1 for episode count\n self.learn_rate = (\n self.initial_learn_rate\n * (self.boyan_N0 + 1.0)\n / (self.boyan_N0 + (self.episode_count + 1) ** 1.1)\n )\n elif self.learn_rate_decay_mode == \"const\":\n self.learn_rate = self.initial_learn_rate\n else:\n self.logger.warn(\"Unrecognized decay mode \")", "def lr(self):\n return .1 / self.h ** 1.5", "def adjust_learning_rate(optimizer, i_iter):\n lr = lr_poly(args.learning_rate, i_iter, args.num_steps, args.power)\n optimizer.param_groups[0]['lr'] = lr\n return lr", "def powAlpha( n ):\n return (1-betaval)*Fib(n) + Fib(n-1)\n #return Fib(n+1) - Fib(n) * betaval", "def decay_learning_rate(initial_learning_rate, i, n_iterations):\n return initial_learning_rate * np.exp(-i / n_iterations)", "def coefficient(self) -> float:\n ...", "def learning_rate_fn():\n start_learning_rate = FLAGS.start_learning_rate\n step = tf.cast(tf.compat.v1.train.get_or_create_global_step(), 'float32')\n effective_step = tf.maximum(step - FLAGS.lr_decay_after_num_steps + 1, 0)\n lr_step_ratio = tf.cast(effective_step, 'float32') / float(\n FLAGS.lr_decay_steps)\n warm_up_factor = tf.cast(tf.minimum(step / float(FLAGS.warm_up_steps), 1.),\n 'float32')\n final_learning_rate = FLAGS.gpu_learning_rate\n # Ease in to final learning rate.\n lr = ((1. - warm_up_factor) * start_learning_rate) + (\n warm_up_factor * final_learning_rate)\n lr = tf.cast(lr, 'float32')\n if FLAGS.lr_decay_type == 'none' or FLAGS.lr_decay_steps <= 0:\n return lr\n elif FLAGS.lr_decay_type == 'exponential':\n return lr * 0.5**lr_step_ratio\n else:\n raise ValueError('Unknown lr_decay_type', FLAGS.lr_decay_type)", "def learning_by_penalized_gradient(y, tx, w, gamma, lambda_):\n\n #on test avec Newton\n\n loss,gradient,_ = penalized_logistic_regression(y,tx,w,lambda_)\n\n w = w - gamma*gradient\n return loss, w,gradient", "def DRate_j(eta,Pap,Pec,exp_loss_jt):\n return (1 + Pap)*(1 - (1 - 2*Pec)*exp_loss_jt)", "def adjust_learning_rate(optimizer, i_iter):\n lr = lr_poly(cfg.Lr, i_iter, cfg.NUM_STEPS, cfg.POWER)\n\n #lr = lr_warm_up(Lr, i_iter, NUM_STEPS, POWER, UP, KEEP)\n for param_lr in optimizer.param_groups:\n param_lr['lr'] = lr\n return lr", "def adjust_learning_rate(optimizer, i_iter):\n lr = lr_poly(cfg.Lr, i_iter, cfg.NUM_STEPS, cfg.POWER)\n\n #lr = lr_warm_up(Lr, i_iter, NUM_STEPS, POWER, UP, KEEP)\n for param_lr in optimizer.param_groups:\n param_lr['lr'] = lr\n return lr" ]
[ "0.6654315", "0.66402304", "0.6613906", "0.6513235", "0.64861625", "0.64501995", "0.63714004", "0.6363292", "0.63441026", "0.62656987", "0.62390316", "0.62067163", "0.62067163", "0.62005514", "0.616968", "0.61533827", "0.6133514", "0.6133514", "0.6092227", "0.6049452", "0.60464483", "0.6029998", "0.6015773", "0.6004177", "0.595236", "0.59504664", "0.59453297", "0.5930754", "0.5925394", "0.5925394" ]
0.8037298
0
Query client to get the practitioner's patient list
def get_patient_list(self, client): self._patient_list = client.get_patient_list(self.id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_patients(self):\n return", "def get_patient_list(self):\n return self._patient_list", "def get_patient_data(self, client):\n for patient in self._monitored_patients.get_patient_list():\n # print(\"Requesting data for \" + patient.first_name+\" \"+patient.last_name+\"...\")\n patient.update_data(client.get_patient_data(patient.id))", "def see_all():\n database = get_connection()\n patients_in_db = []\n patient: dict = database.patients.find()\n for p in patient:\n pat = p[\"patient_data\"]\n patients_in_db.append(pat)\n print(patients_in_db)\n return patients_in_db", "def getPatients(self):\n if not self.patients.choices:\n db = DBStorage()\n options = []\n for patient in db.all_patients():\n options.append((patient.id, '{} {}'.format(\n patient.name, patient.last_name)))\n self.patients.choices = options\n self.patients.default = 1", "def patients(request):\n try:\n doctor = Doctor.objects.get(user=request.user)\n except Doctor.DoesNotExist:\n raise Http404(\"Doctor with current user instance not found!\")\n\n #filter patients from asymptomatics\n patient_objects = doctor.patients.all()\n patients = []\n\n for patient in patient_objects:\n if patient.asymptomatic == False:\n patients.append(patient)\n\n\n #now get the doctors patient instances\n patients = get_patients_list(patients)\n\n return JsonResponse({\n \"patients\": patients\n })", "def get(self):\n all_patients = model_patient.query.all()\n return jsonify(all_patients)", "def results(self):\n q = self.cleaned_data['q'].strip()\n patients = PatientInformation.objects.filter(Q(operator__username__contains=q) | \\\n Q(patient_id__contains=q) | Q(first_name__contains=q) | Q(last_name__contains=q) | \\\n Q(email__contains=q)).distinct()\n return patients", "def search_chart_review_data(self, project, cohort, patient):\n queryset = PatientChartReview.objects.filter(\n status=PatientChartReview.StatusType.completed).order_by('patient', '-updated_on')\n\n if project is not None:\n queryset = queryset.filter(Q(project__name__icontains=project))\n if cohort is not None:\n queryset = queryset.filter(Q(cohort__name__icontains=cohort))\n if patient is not None:\n queryset = queryset.filter(patient__curation_status=Patient.StatusType.completed & Q(\n patient__patient_id__icontains=patient))\n\n queryset = queryset.distinct('patient')\n return queryset", "def get(self):\n with open_session() as session:\n try:\n records = session.query(Patient).all()\n except NoResultFound:\n logger.info(\"No record found\") # TODO: remove debugging\n return gen_response(\"No result found\")\n except Exception as error:\n logger.exception(\"Exeption: %s\" % (str(error)))\n return gen_response(\"Internal server error\")\n\n # Build the response list\n rlist = [to_dict(record) for record in records]\n return gen_response(rlist)", "def details(self):\n \n sparql_results = self.query (\"\"\"\n select distinct * where {\n\n BIND (<%s> as ?rc)\n \n ?rc olac:speaker ?participant .\n ?participant austalk:id ?pid .\n ?participant austalk:recording_site ?site .\n ?site rdfs:label ?sitelabel .\n \n ?rc austalk:prototype ?component .\n ?component austalk:shortname ?shortname .\n ?rc dc:isPartOf ?rs .\n ?rs austalk:prototype ?session .\n ?session austalk:id ?sessionid .\n \n ?component austalk:name ?name . \n\\\n optional { ?rc austalk:audiorating ?arating .}\n optional { ?rc austalk:videorating ?vrating .}\n optional { ?rc austalk:comment ?comment .}\n }\"\"\" % (self.identifier, ))\n \n # we expect one binding\n bindings = sparql_results[\"results\"][\"bindings\"]\n if len(bindings) == 1:\n bindings = bindings[0]\n self.participantId = bindings['pid']['value']\n self.prototype = bindings['component']['value']\n self.name = bindings['name']['value']\n self.componentId = bindings['shortname']['value']\n self.site = bindings['sitelabel']['value']\n self.sessionId = bindings['sessionid']['value']\n if bindings.has_key('arating'):\n self.audiorating = bindings['arating']['value']", "def hospital_list(self, request, **dict):\n\t\tdata = self.get_serializer(self.get_queryset(), many=True).data\n\t\treturn Response(data, status.HTTP_200_OK)", "def test_get_specific_pacient(self):\n url = '/api/v1/pacientes/{}/'.format(self.app_client.id)\n request = self.client.get(url)\n self.assertEqual(request.status_code, status.HTTP_200_OK)", "def get_queryset(self):\n username = self.request.user.username\n patient = UniquePatient.objects.filter(patient__user__username=username)\n return patient", "def get_filtered_patients(\n icd_code_list: list = cml_codes_list,\n num_months: int = NUM_MONTHS,\n occurrence_count: int = 2,\n medication_query: str = None,\n) -> pd.DataFrame:\n\n # (Optional) get the medications query\n meds_query = inQuery(\n [\n \"medication_generic_name\",\n \"order_description\",\n \"med_generic\",\n \"order_drugs\",\n \"meds_drugs\",\n \"med_generic\",\n \"med_name_description\",\n \"med_generic_name_description\",\n ],\n drugs,\n )\n codes_query = inQuery(\"diagnosis_code\", icd_code_list)\n query = andQuery(meds_query, codes_query)\n\n cohort1 = rec.makeCohort(\n cohortName=\"cohort_name_here\",\n cohortSpecifier=query,\n timeWindow=num_months,\n unit=UNIT_NAME,\n )\n\n cohort1.initDump(\n cohortProjector=[\n \"patient_id\",\n \"timestamp\",\n \"diagnosis_code\",\n \"meds_drugs\",\n \"disease\",\n ]\n )\n\n if cohort1.advanceDF():\n df = cohort1.getDF()\n # display(df)\n\n return df", "def test_showing_patient_ratings_chart_as_patient(self):\n\n result = self.client.get(\"/patient/1/ratings-chart\")\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"button in the sidebar\", result.data)\n\n result = self.client.get(\"/patient/4/ratings-chart\",\n follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"not authorized\", result.data)", "def test_dbgap_research_subjects_with_patients(caplog, dbgap_server):\n validate_research_subjects_with_patients(caplog, search_research_subjects_with_patients('phs002409'), dbgap_server)", "def search_research_subjects_with_patients(study_id):\n return ResearchSubject.where(struct={'study': f'ResearchStudy/{study_id}'}).include('individual').include('study')", "def get_talks(self):\r\n return QtSql.QSqlQuery('''SELECT * FROM presentations''')", "def get_patient_cases(patient):\n # ----- Get database connection\n db = connect_to_db()\n try:\n c1 = db.cursor()\n try:\n c1.execute(\n \"\"\"SELECT tc.SLABEL \"\"\"\n \"\"\"FROM BOM.PATIENT pt \"\"\"\n \"\"\" INNER JOIN BOM.TCASE tc ON pt.SUID = tc.SPATIENTUID \"\"\"\n \"\"\"WHERE \"\"\"\n \"\"\" pt.SID = '%s' \"\"\" %\n patient)\n res = c1.fetchall()\n cases = []\n for re in res:\n cases.append(re[0])\n finally:\n c1.close()\n finally:\n db.close()\n return cases", "def test_showing_patient_goals_as_patient(self):\n\n result = self.client.get(\"/patient/1/goals\")\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"Goals\", result.data)\n\n result = self.client.get(\"/patient/4/goals\", follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"not authorized\", result.data)", "def get_vip_clinical():\n\n db = app.data.driver.db\n\n # limit access to service account only\n auth = request.authorization\n if not auth:\n return json.dumps({\"error\": \"no authorization supplied\"})\n\n accounts = db.user\n user = accounts.find_one({'token': auth.username})\n if not user:\n return json.dumps({\"error\": \"not authorized\"})\n\n query = {}\n params = request.args.get('where', None)\n if params is not None:\n query = json.loads(request.args.get('where'))\n\n if 'get_new_patients_only' in query:\n query['_created'] = {'$gte': datetime.datetime.strptime(query['data_push_id'], '%Y-%m-%d %X')}\n del query['get_new_patients_only']\n\n clinical_ll = list(db.clinical.find(query))\n for clinical in clinical_ll:\n for field, val in clinical.items():\n if not isinstance(field, float) and not isinstance(field, int):\n try:\n clinical[field] = str(val)\n except UnicodeEncodeError:\n continue\n\n return json.dumps(clinical_ll)", "def get_vip_clinical():\n\n db = app.data.driver.db\n\n # limit access to service account only\n auth = request.authorization\n if not auth:\n return json.dumps({\"error\": \"no authorization supplied\"})\n\n accounts = db.user\n user = accounts.find_one({'token': auth.username})\n if not user:\n return json.dumps({\"error\": \"not authorized\"})\n\n query = {}\n params = request.args.get('where', None)\n if params is not None:\n query = json.loads(request.args.get('where'))\n\n if 'get_new_patients_only' in query:\n query['_created'] = {'$gte': datetime.datetime.strptime(query['data_push_id'], '%Y-%m-%d %X')}\n del query['get_new_patients_only']\n\n clinical_ll = list(db.clinical.find(query))\n for clinical in clinical_ll:\n for field, val in clinical.items():\n if not isinstance(field, float) and not isinstance(field, int):\n try:\n clinical[field] = str(val)\n except UnicodeEncodeError:\n continue\n\n return json.dumps(clinical_ll)", "def show_all_records(request, patient_id):\n if (request.user.patient_username.id != patient_id):\n Logs.objects.create(type='READ', user_id=request.user.uid, interface='PATIENT', status=STATUS_ERROR, details='[Show All Records] Logged in user does not match ID in URL. URL ID: ' + str(patient_id))\n return redirect('/patient/login/')\n\n patient = patient_does_not_exists(patient_id)\n\n # Get all records from Readings, TimeSeries, Documents, Images, and Videos\n readings = Readings.objects.filter(patient_id=patient)\n timeseries = TimeSeries.objects.filter(patient_id=patient)\n documents = Documents.objects.filter(patient_id=patient).exclude(type='Healthcare Professional Note')\n images = Images.objects.filter(patient_id=patient)\n videos = Videos.objects.filter(patient_id=patient)\n\n results = list(chain(readings, timeseries, documents, images, videos))\n\n Logs.objects.create(type='READ', user_id=patient.username.uid, interface='PATIENT', status=STATUS_OK, details='Show All Records')\n\n context = {\n 'patient': patient,\n 'results': results\n }\n\n return render(request, 'show_all_records.html', context)", "def patient_profile():\n patient_email = request.args.get('pick_patient')\n\n # Fetching the APIs\n response_get_patient_details = requests.post(server_url + 'doctor/get_patient_details', json={\n 'patient_email': patient_email\n })\n response_get_records_for_patient = requests.post(server_url + 'doctor/get_all_medical_records_for_patient', json={\n 'patient_email': patient_email\n })\n patient_details = response_get_patient_details.json()\n medical_records = response_get_records_for_patient.json()\n\n return render_template('doctors/patient_profile.html', patient_details=patient_details,\n medical_records=medical_records)", "def recovered(request):\n try:\n doctor = Doctor.objects.get(user=request.user)\n except Doctor.DoesNotExist:\n raise Http404(\"Doctor with current user instance not found!\")\n\n #first ge the doctors patients\n patients = doctor.patients.all()\n\n #filter those that are asymptomatic to be in recovered\n recovered = []\n for patient in patients:\n if patient.asymptomatic == True:\n recovered.append(patient)\n else:\n pass \n\n return JsonResponse({\n \"recovered\": get_patients_list(recovered)\n })", "def get_queryset(self):\n judge_qs = Judge.objects.filter(judge=self.request.user)\n return Contest.objects.filter(\n pk__in=judge_qs.values('contest'),\n publish_date__gte=timezone.now(),\n )", "def get(self):\n args = self.get_parser.parse_args(strict=True)\n\n with open_session() as session:\n try:\n records = session.query(ECG) \\\n .filter(ECG.patient_id == args.patient_id) \\\n .all()\n except NoResultFound:\n logger.info(\"No record found\")\n resp = gen_response(\"No result found\")\n resp.status_code = 404\n return resp\n except Exception as error:\n logger.exception(\"Exeption: %s\" % (str(error)))\n return gen_response(\"Internal server error\")\n\n # Build the response list\n print(records)\n rlist = [to_dict(record) for record in records]\n return gen_response(rlist)", "def get_patient_search(form, csv=False):\n # Get cursor.\n conn = engine.raw_connection()\n\n # Get form data for non-date fields.\n date_fields = [\"MostRecentTestDateLo\", \"MostRecentTestDateHi\"]\n args = form.data\n\n if args[\"PersonType\"] == \"Patient\":\n data = {k: v for (k, v) in args.items() if not is_empty_form_value(v) and k not in date_fields + [\"EmplCampus\", \"EmplWorkLocation\"]}\n elif args[\"PersonType\"] == \"Employee\":\n data = {k: v for (k, v) in args.items() if not is_empty_form_value(v) and k not in date_fields + [\"InstitutionName\", \"WardLocationName\"]}\n else:\n data = {k: v for (k, v) in args.items() if not is_empty_form_value(v) and k not in date_fields + [\"InstitutionName\", \"WardLocationName\", \"EmplCampus\", \"EmplWorkLocation\"]}\n \n # Construct the where clause and params for non-date fields.\n ands = []\n if \"AdmitStatus\" in data.keys() and data[\"AdmitStatus\"] == \"Not Currently Admitted (Never Admitted, Discharged, or Unknown)\":\n ands.append(\"(AdmitStatus in ('never admitted', 'discharged', 'unknown'))\")\n del data[\"AdmitStatus\"]\n for k, _ in data.items():\n ands.append(\"([{}] = ?)\".format(k))\n if len(ands) > 0:\n where = \"and \" + (\" and \".join(ands))\n else:\n where = \"\" \n params = [v for _, v in data.items()]\n\n # Construct the where clause and parameters for date fields.\n date_ands = []\n date_params = []\n \n if not is_empty_form_value(form.MostRecentTestDateLo.data): \n date_ands.append(\"convert(date, MostRecentTestDateTime) >= ? \")\n date_params.append(form.MostRecentTestDateLo.data) \n if not is_empty_form_value(form.MostRecentTestDateHi.data):\n date_ands.append(\"convert(date, MostRecentTestDateTime) <= ? \")\n date_params.append(form.MostRecentTestDateHi.data)\n if len(date_ands) > 0:\n date_where = \"and \" + (\" and \".join(date_ands))\n else:\n date_where = \"\"\n\n # If called for patient-search-display, a reduced set of columns is returned to achieve better performance;\n # Otherwise, for patient-search-csv, all columns are returned.\n display_columns = [\"SSN4\", \"LastName\", \"FirstName\", \"Sta3n,\" \"MostRecentTestDateTime\", \"MostRecentTestResult\", \"PersonType\", \"InstitutionName\", \"WardLocationName\",\n \"EmplCampus\", \"EmplWorkLocation\", \"EmplEmployeeRole\", \"AdmitStatus\", \"PACT_Provider\", \"Contacted\", \"Tracked\", \"PersonID\"]\n if csv:\n select_columns = \"*\"\n else:\n select_columns = \", \".join(display_columns)\n \n # Execute the main query.\n q = (\"select \" + select_columns + \n \" from {schema}.PersonCombined \".format(schema=app_schema) +\n \"where (MostRecentTestResult is not null) \" +\n where + date_where +\n \" order by MostRecentTestDateTime desc\")\n print(q, params + date_params, file = sys.stderr)\n tb = pd.read_sql(q, conn, params=params + date_params)\n \n tb[\"MostRecentTestDateTime\"] = [correct_nats(x) for x in tb[\"MostRecentTestDateTime\"]]\n tb[\"Sta3n\"] = [correct_nans(y) for y in tb[\"Sta3n\"]]\n\n return tb", "def filter_by_participant (self, participant):\n \n sparql_results = self.query (\"\"\"\n select distinct ?rs ?session ?name ?number ?pid ?sitename\n where {\n BIND (<%s> AS ?participant)\n \n ?rs rdf:type austalk:RecordedSession .\n ?rs olac:speaker ?participant .\n \n ?participant austalk:id ?pid .\n ?participant austalk:recording_site ?site .\n ?site rdfs:label ?sitename .\n \n ?rs austalk:prototype ?session .\n ?session austalk:name ?name .\n ?session austalk:id ?number .\n }\n ORDER BY ?name\"\"\" % participant.identifier)\n \n results = []\n\n for result in sparql_results[\"results\"][\"bindings\"]:\n results.append (Session (\n client = self.client,\n identifier = result[\"rs\"][\"value\"],\n prototype = result[\"session\"][\"value\"],\n name = result[\"name\"][\"value\"],\n number = result[\"number\"][\"value\"],\n site = result[\"sitename\"][\"value\"],\n participantId = result[\"pid\"][\"value\"]))\n\n return results" ]
[ "0.6473012", "0.6200664", "0.5929303", "0.59278613", "0.59198064", "0.5768752", "0.5707701", "0.5612031", "0.5579454", "0.5469084", "0.5455913", "0.54373837", "0.5435617", "0.5366657", "0.53431135", "0.5327053", "0.5318471", "0.5302202", "0.5292539", "0.5275686", "0.5255918", "0.5215425", "0.5215425", "0.5196219", "0.51787883", "0.51783556", "0.5156677", "0.5156646", "0.5144437", "0.51441705" ]
0.67707795
0
For each patient in the practitioner's monitored patient list, retrieve their data from the server
def get_patient_data(self, client): for patient in self._monitored_patients.get_patient_list(): # print("Requesting data for " + patient.first_name+" "+patient.last_name+"...") patient.update_data(client.get_patient_data(patient.id))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_patient_list(self, client):\n self._patient_list = client.get_patient_list(self.id)", "def get_patients(self):\n return", "def see_all():\n database = get_connection()\n patients_in_db = []\n patient: dict = database.patients.find()\n for p in patient:\n pat = p[\"patient_data\"]\n patients_in_db.append(pat)\n print(patients_in_db)\n return patients_in_db", "def get_patient_list(self):\n return self._patient_list", "def patients(request):\n try:\n doctor = Doctor.objects.get(user=request.user)\n except Doctor.DoesNotExist:\n raise Http404(\"Doctor with current user instance not found!\")\n\n #filter patients from asymptomatics\n patient_objects = doctor.patients.all()\n patients = []\n\n for patient in patient_objects:\n if patient.asymptomatic == False:\n patients.append(patient)\n\n\n #now get the doctors patient instances\n patients = get_patients_list(patients)\n\n return JsonResponse({\n \"patients\": patients\n })", "def get(self):\n all_patients = model_patient.query.all()\n return jsonify(all_patients)", "def get_patient_cases(patient):\n # ----- Get database connection\n db = connect_to_db()\n try:\n c1 = db.cursor()\n try:\n c1.execute(\n \"\"\"SELECT tc.SLABEL \"\"\"\n \"\"\"FROM BOM.PATIENT pt \"\"\"\n \"\"\" INNER JOIN BOM.TCASE tc ON pt.SUID = tc.SPATIENTUID \"\"\"\n \"\"\"WHERE \"\"\"\n \"\"\" pt.SID = '%s' \"\"\" %\n patient)\n res = c1.fetchall()\n cases = []\n for re in res:\n cases.append(re[0])\n finally:\n c1.close()\n finally:\n db.close()\n return cases", "def get_patient_dict():\r\n return common.get_dict_all(get_patient_filename(), None)", "def get_meeting_registration_data(meeting):\n num_created = 0\n num_processed = 0\n response = requests.get(settings.STATS_REGISTRATION_ATTENDEES_JSON_URL.format(number=meeting.number))\n if response.status_code == 200:\n decoded = []\n try:\n decoded = response.json()\n except ValueError:\n if response.content.strip() == 'Invalid meeting':\n pass\n else:\n raise RuntimeError(\"Could not decode response from registrations API: '%s...'\" % (response.content[:64], ))\n\n\n # for each user identified in the Registration system\n # Create a DataTracker MeetingRegistration object\n for registration in decoded:\n person = None\n # capture the stripped registration values for later use\n first_name = registration['FirstName'].strip()\n last_name = registration['LastName'].strip()\n affiliation = registration['Company'].strip()\n country_code = registration['Country'].strip()\n address = registration['Email'].strip()\n object, created = MeetingRegistration.objects.get_or_create(\n meeting_id=meeting.pk,\n first_name=first_name[:200],\n last_name=last_name[:200],\n affiliation=affiliation,\n country_code=country_code,\n email=address,\n )\n\n # Add a Person object to MeetingRegistration object\n # if valid email is available\n if object and not object.person and address:\n # If the person already exists do not try to create a new one\n emails = Email.objects.filter(address=address)\n # there can only be on Email object with a unique email address (primary key)\n if emails.exists():\n person = emails.first().person\n # Create a new Person object\n else:\n # Normalize all-caps or all-lower entries. Don't touch\n # others, there might be names properly spelled with\n # internal uppercase letters.\n if ( ( first_name == first_name.upper() or first_name == first_name.lower() )\n and ( last_name == last_name.upper() or last_name == last_name.lower() ) ):\n first_name = first_name.capitalize()\n last_name = last_name.capitalize()\n regname = \"%s %s\" % (first_name, last_name)\n # if there are any unicode characters decode the string to ascii\n ascii_name = unidecode_name(regname)\n\n # Create a new user object if it does not exist already\n # if the user already exists do not try to create a new one\n users = User.objects.filter(username=address)\n if users.exists():\n user = users.first()\n else:\n # Create a new user.\n user = User.objects.create(\n first_name=first_name[:30],\n last_name=last_name[:30],\n username=address,\n email=address,\n )\n if user.first_name != first_name:\n debug.say(\"Truncated first name: %s --> %s\" % (first_name, user.first_name))\n if user.last_name != last_name:\n debug.say(\"Truncated last name: %s --> %s\" % (last_name, user.last_name))\n\n aliases = Alias.objects.filter(name=regname)\n if aliases.exists():\n person = aliases.first().person\n else:\n # Create the new Person object.\n person = Person.objects.create(\n name=regname,\n ascii=ascii_name,\n user=user,\n )\n\n # Create an associated Email address for this Person\n try:\n email = Email.objects.get(person=person, address=address[:64])\n except Email.DoesNotExist:\n email = Email.objects.create(person=person, address=address[:64], origin='registration: ietf-%s'%meeting.number)\n if email.address != address:\n debug.say(\"Truncated address: %s --> %s\" % (address, email.address))\n\n # If this is the only email address, set primary to true.\n # If the person already existed (found through Alias) and\n # had email addresses, we don't do this.\n if Email.objects.filter(person=person).count() == 1:\n email.primary = True\n email.save()\n\n # update the person object to an actual value\n object.person = person\n object.save()\n \n if created:\n num_created += 1\n num_processed += 1\n else:\n raise RuntimeError(\"Bad response from registrations API: %s, '%s'\" % (response.status_code, response.content))\n num_total = MeetingRegistration.objects.filter(meeting_id=meeting.pk).count()\n if meeting.attendees is None or num_total > meeting.attendees:\n meeting.attendees = num_total\n meeting.save()\n return num_created, num_processed, num_total", "def load_patient_data():\n data_file = open(\"test_data.txt\", \"r\")\n still_finding_patients = True\n my_patients = []\n while still_finding_patients is True:\n name_line = next(data_file)\n if name_line != \"END\":\n name_line = name_line.split()\n fname = name_line[0]\n lname = name_line[1]\n age = next(data_file).strip()\n gender = next(data_file).strip().casefold()\n tsh_data = next(data_file)\n tsh_data = tsh_data.strip().split(\",\")\n tsh_data.remove(\"TSH\")\n new_patient = create_patient(fname, lname, age, gender, tsh_data)\n my_patients.append(new_patient)\n else:\n still_finding_patients = False\n data_file.close()\n return my_patients", "def show_all_records(request, patient_id):\n if (request.user.patient_username.id != patient_id):\n Logs.objects.create(type='READ', user_id=request.user.uid, interface='PATIENT', status=STATUS_ERROR, details='[Show All Records] Logged in user does not match ID in URL. URL ID: ' + str(patient_id))\n return redirect('/patient/login/')\n\n patient = patient_does_not_exists(patient_id)\n\n # Get all records from Readings, TimeSeries, Documents, Images, and Videos\n readings = Readings.objects.filter(patient_id=patient)\n timeseries = TimeSeries.objects.filter(patient_id=patient)\n documents = Documents.objects.filter(patient_id=patient).exclude(type='Healthcare Professional Note')\n images = Images.objects.filter(patient_id=patient)\n videos = Videos.objects.filter(patient_id=patient)\n\n results = list(chain(readings, timeseries, documents, images, videos))\n\n Logs.objects.create(type='READ', user_id=patient.username.uid, interface='PATIENT', status=STATUS_OK, details='Show All Records')\n\n context = {\n 'patient': patient,\n 'results': results\n }\n\n return render(request, 'show_all_records.html', context)", "def find_data(id_list):\n people_list = []\n for id in id_list:\n new_person = Query()\n new_person.id = id\n new_person.find_details()\n people_list.append(new_person)\n \n return people_list", "def getPatients(self):\n if not self.patients.choices:\n db = DBStorage()\n options = []\n for patient in db.all_patients():\n options.append((patient.id, '{} {}'.format(\n patient.name, patient.last_name)))\n self.patients.choices = options\n self.patients.default = 1", "def recovered(request):\n try:\n doctor = Doctor.objects.get(user=request.user)\n except Doctor.DoesNotExist:\n raise Http404(\"Doctor with current user instance not found!\")\n\n #first ge the doctors patients\n patients = doctor.patients.all()\n\n #filter those that are asymptomatic to be in recovered\n recovered = []\n for patient in patients:\n if patient.asymptomatic == True:\n recovered.append(patient)\n else:\n pass \n\n return JsonResponse({\n \"recovered\": get_patients_list(recovered)\n })", "def analysis():\n\n response_all_doctors_and_appointments = requests.post(server_url + 'doctor/all_doctors_and_all_appointments')\n doctors_and_appointments = response_all_doctors_and_appointments.json()\n\n return render_template('clerks/analysis.html', doctors_and_appointments=doctors_and_appointments)", "def perform_patient_audit(self):\n \n # Continuous audit\n while True:\n \n # General patient audit (put audit results in dictionary and add to audit DataFrame)\n audit = dict()\n audit['day'] = self._env.now\n audit['negative'] = len(self._pop.negative_patients)\n audit['positive'] = len(self._pop.positive_patients)\n audit['recovered'] = len(self._pop.recovered_patients)\n audit['inpatient'] = len(self._pop.inpatients)\n audit['died'] = len(self._pop.died_patients)\n audit['total'] = len(self._pop.patients)\n audit['unallocated'] = len(self._pop.unallocated_patients)\n # Add dictionary to existing DataFrame\n self.patient_audit = \\\n self.patient_audit.append(audit, ignore_index=True)\n \n # Displaced patient audit (put audit results in dictionary and add to audit DataFrame)\n audit = dict()\n # Get displaced times\n additional_time = []\n for patient in self._pop.displaced_patients:\n additional_time.append(patient.displaced_additional_time)\n audit['day'] = self._env.now\n audit['number'] = len(self._pop.displaced_patients)\n if len(additional_time) > 0:\n # Dispalced patients exist, calculate statistics\n audit['add_time_min'] = np.min(additional_time)\n audit['add_time_1Q'] = np.quantile(additional_time, 0.25)\n audit['add_time_median'] = np.quantile(additional_time, 0.50)\n audit['add_time_3Q'] = np.quantile(additional_time, 0.75)\n audit['add_time_max'] = np.max(additional_time)\n audit['add_time_total'] = np.sum(additional_time)\n else:\n # No displaced patients exist, set all statistics to zero\n audit['add_time_min'] = 0\n audit['add_time_1Q'] = 0\n audit['add_time_median'] = 0\n audit['add_time_3Q'] = 0\n audit['add_time_max'] = 0\n audit['add_time_total'] = 0\n # Add dictionary to existing DataFrame\n self.displaced_audit = \\\n self.displaced_audit.append(audit, ignore_index=True)\n \n # Trigger next audit after interval\n yield self._env.timeout(self._params.audit_interval)", "def all_participants_data(study_name: str):\n # get all participants' name-ids\n participants = CC_driver.get_all_participants(study_name)\n\n if len(participants) > 0:\n participants_rdd = CC_driver.sc.parallelize(participants)\n results = participants_rdd.map(\n lambda participant: diagnose_pipeline(participant[\"identifier\"], CC_worker, config))\n results.count()\n else:\n print(study_name, \"- Study contains no participant.\")", "def get_patient_status():\n r = requests.get(\"http://vcm-7474.vm.duke.edu:5000/api/heart_rate/3\")\n print(r.text)", "def measurements():\n print(\"server received request for precipitation data...\")\n return jsonify(measurements_data)", "def get_participants_data(self):\n participants = []\n for (email, uid) in self.tokens.items():\n participant = {} \n participant['uid'] = uid\n participant['email'] = email\n response = 0\n questions = 0\n sections = [x for x in self.values() if ISurveySection.providedBy(x)]\n for section in sections:\n response += len(section.responses.get(uid, {}))\n questions += len(section.question_ids)\n if response != 0:\n participant['finished'] = Decimal(response) / Decimal(questions) * 100\n else:\n participant['finished'] = 0 \n participants.append(participant)\n return participants", "def hospital_list(self, request, **dict):\n\t\tdata = self.get_serializer(self.get_queryset(), many=True).data\n\t\treturn Response(data, status.HTTP_200_OK)", "def test_multi_patient_creation(self):\n node1 = self.create_xml_patient()\n node2 = self.create_xml_patient()\n node3 = self.create_xml_patient()\n payload = self.create_payload([node1, node2, node3])\n parse_payload(payload)\n payload = reminders.PatientDataPayload.objects.all()[0]\n self.assertEqual(payload.status, 'success')\n self.assertEqual(payload.patients.count(), 3)", "def measurements_lookup(client, database):\n client.switch_database(database)\n mlist_dict = client.get_list_measurements()\n # print(\"def measurements_lookup 010:\", mlist_dict[:10])\n return mlist_dict", "def parse_patient_data(input_file, output_file):\n rows = []\n if os.path.exists(input_file):\n with open(input_file) as f:\n data = json.loads(f.read())\n for entry in data.get('entry', []):\n url = entry.get('fullUrl', '')\n resource = entry.get('resource', {})\n if not resource:\n print('resource not found for url %s in %s: please check' % (url, in_file), file=sys.stderr)\n continue\n\n resource_type = resource.get('resourceType', '').lower()\n if resource_type != 'patient':\n print('resource type is not patient for url %s in %s: please check' % (url, in_file), file=sys.stderr)\n continue\n\n resource_id = resource.get('id', '')\n last_updated = resource.get('meta', {}).get('lastUpdated', '')\n status = resource.get('text', {}).get('status', '').lower()\n system_id = resource.get('text', {}).get('value')\n active = resource.get('active', False)\n first_name = resource.get('name', {})[0].get('given', [''])[0]\n last_name = resource.get('name', {})[0].get('family', [''])\n phone = resource.get('telecom', [{}])[0].get('value', '')\n gender = resource.get('gender', '')\n address = resource.get('address', [{}])[0].get('value', '')\n\n rows.append([url, resource_id, last_updated, status, system_id, active, first_name, last_name, phone, gender, address])\n\n with open(output_file, 'w') as f:\n writer = csv.writer(f, dialect=csv.excel)\n writer.writerow(['url', 'resource_id', 'last_updated', 'status', 'system_id', 'active', 'first_name', 'last_name', 'phone', 'gender', 'address'])\n for row in rows:\n writer.writerow(row)", "def get_patients_in_ic(self):\n\n query = \"SELECT * FROM patients WHERE datetime_discharge IS NULL\"\n\n return self.mysql_obj.fetch_rows(query)", "def fetchTAC(self):\n\n last_hour = datetime.datetime.now().date() - datetime.timedelta(hours = 1)\n last_hour = \"{}{}{}\".format(\"'\", last_hour, \"'\")\n last_hour = datetime.date(2011, 4, 5)\n\n self.hlr_cur.execute(\"SELECT id FROM Subscriber WHERE updated >= {date};\".format(date = last_hour))\n subscribers = self.hlr_cur.fetchall()\n\n parsed_data = {}\n unique_imei = {}\n #uid_count = 0\n\n for subscriber in subscribers:\n self.hlr_cur.execute(\"SELECT IMEI FROM Equipment WHERE id = (SELECT equipment_id FROM EquipmentWatch WHERE subscriber_id = {s_id});\".format(s_id = subscriber[0]))\n parsed_imei = self.hlr_cur.fetchall()\n\n if len(parsed_imei) > 0:\n for imei in parsed_imei:\n imei_number = imei[0] \n\n if imei_number not in unique_imei:\n unique_imei[imei_number] = subscriber[0]\n\n uid = unique_imei[imei_number]\n parsed_data.setdefault((uid), str(imei_number)[:8])\n\n self.saveRecords(parsed_data)", "def load_patient(self, patient):\n \n # Maintain patient count \n self.count_total_patients += 1\n \n # Allocate patient to subunit/session\n self.allocate_patient(patient)\n \n # Add to appropriate _population lists\n patient_dict = {'negative': self._pop.negative_patients, \n 'positive': self._pop.positive_patients,\n 'recovered': self._pop.recovered_patients,\n 'died': self._pop.died_patients}\n \n # Add to population dictionary of all patients\n patient_dict[patient.status].append(patient)", "def get_members(self):\n return sorted([x[\"patient\"] for x in self.get_filtered_pedigree_with_samples()])", "def results(self):\n q = self.cleaned_data['q'].strip()\n patients = PatientInformation.objects.filter(Q(operator__username__contains=q) | \\\n Q(patient_id__contains=q) | Q(first_name__contains=q) | Q(last_name__contains=q) | \\\n Q(email__contains=q)).distinct()\n return patients", "def _get_data(self):\n devices = []\n try:\n if not self.router_client.login():\n self.hass.states.set(f\"{DOMAIN}.statusmsg\", self.router_client.statusmsg)\n _LOGGER.warning(\"Login failed: {0}:{1}@{2}\".format(self.router_client.username, self.router_client.password,self.router_client.host))\n self.router_client.logout()\n return devices\n\n devices_json = self.router_client.get_devices_response()\n finally:\n self.router_client.logout()\n\n self.hass.states.set(f\"{DOMAIN}.scanning\", devices_json != False)\n\n if devices_json != False:\n for device in devices_json:\n # _LOGGER.debug(\"Device: {0}\".format(device))\n dev = Device(\n device['HostName'].replace('未知设备', 'Unknown'),\n device['IPAddress'],\n device['MACAddress'],\n device['Active'],\n ICONS.get(device['IconType'])\n )\n # _LOGGER.debug(\"Device: {0}\".format(dev))\n devices.append(dev)\n return devices\n else:\n return []" ]
[ "0.6629279", "0.6568286", "0.65445", "0.6223222", "0.60501635", "0.5896784", "0.5879684", "0.57728606", "0.565185", "0.56371266", "0.5596385", "0.5589422", "0.5585452", "0.5550535", "0.55420274", "0.5493593", "0.5476058", "0.54563004", "0.5411978", "0.5362406", "0.5329139", "0.5294736", "0.5255513", "0.52436924", "0.5242369", "0.5239937", "0.5239658", "0.52378875", "0.5230902", "0.52190864" ]
0.7766716
0
Add patient to the monitor list from the all patients list
def add_patient_monitor(self, patient_name): for patient in self._patient_list.get_patient_list(): if patient_name == patient.first_name + " " + patient.last_name: self._monitored_patients.add_patient(patient)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_patient(self, patient):\n if isinstance(patient, Patient):\n self._patient_list.append(patient)\n self.calculate_avg_cholesterol()", "def update_patients(self, list):\n\n self.llista.delete(0, tk.END)\n for i in range(len(list)):\n self.llista.insert(tk.END, list[i])\n self.llista.bind('<Double-1>', self.select_patient)", "def get_patient_data(self, client):\n for patient in self._monitored_patients.get_patient_list():\n # print(\"Requesting data for \" + patient.first_name+\" \"+patient.last_name+\"...\")\n patient.update_data(client.get_patient_data(patient.id))", "def clear_monitor(self):\n self._monitored_patients = PatientList()", "def get_patient_list(self, client):\n self._patient_list = client.get_patient_list(self.id)", "def get_patient_list(self):\n return self._patient_list", "def get_patients(self):\n return", "def connect_monitor_devices_to_daq(self):\n scan = self.measure['scan']\n devices_to_monitor = scan['detectors']\n\n # Clear the DAQS just in case is a second scan running\n for d in self.daqs:\n self.daqs[d]['monitor'] = []\n\n for d in devices_to_monitor:\n dev = self.devices[d]\n self.daqs[dev.properties['connection']['device']]['monitor'].append(dev)", "def allocate_inpatient(self, patient):\n\n units_to_check = self.unit_order_by_patient_postcode.loc[patient.location] \n\n for unit_to_check in units_to_check:\n if self.inpatient_units.loc[unit_to_check].values[0] == 1:\n patient.current_unit = unit_to_check\n self.inpatient_counts.loc[unit_to_check]['inpatients'] += 1\n break", "def _schedule(self):\n name=input(\"\\nEnter the patient's name\")\n condition=self._getCondition()\n self._model.add(Patient(name,condition))\n print(name,\"is added to the \",str(condition),\" list\\n\")", "def append_diagnostic(self, diagnostic):\n self._diagnostics_list.append(diagnostic)", "def push_addr_reservation_list(self, lst_new):\n self.__not_implemented()", "def load_patient(self, patient):\n \n # Maintain patient count \n self.count_total_patients += 1\n \n # Allocate patient to subunit/session\n self.allocate_patient(patient)\n \n # Add to appropriate _population lists\n patient_dict = {'negative': self._pop.negative_patients, \n 'positive': self._pop.positive_patients,\n 'recovered': self._pop.recovered_patients,\n 'died': self._pop.died_patients}\n \n # Add to population dictionary of all patients\n patient_dict[patient.status].append(patient)", "def addAllergies(self):\n if int(self.pid)%100 < 85: # no allergies for ~ 85%\n exclusion = NO_ALLERGY.sub({\n 'exclusion':\"no known allergies\",\n 'exclusion_id':\"160244002\",\n }).done()\n self.data.append(SDMX.sub({'models':exclusion}, escape=False).done())\n else: # Sprinkle in some sulfa allergies\n al = DRUG_CLASS_ALLERGY.sub({\n 'reaction': \"skin rash\",\n 'reaction_id': \"271807003\",\n 'category': \"drug allergy\",\n 'category_id': \"416098002\",\n 'allergen': \"sulfonamide antibacterial\",\n 'allergen_id': \"N0000175503\",\n 'severity': \"mild\",\n 'severity_id': \"255604002\",\n }).done()\n self.data.append(SDMX.sub({'models':al}, escape=False).done())\n \n if int(self.pid)%2: # and throw in peanut allergies for every other patient\n al = FOOD_ALLERGY.sub({\n 'reaction': \"anaphylaxis\",\n 'reaction_id': \"39579001\",\n 'category': \"food allergy\",\n 'category_id': \"414285001\",\n 'allergen': \"peanut\",\n 'allergen_id': \"QE1QX6B99R\",\n 'severity': \"severe\",\n 'severity_id': \"24484000\",\n }).done()\n self.data.append(SDMX.sub({'models':al}, escape=False).done())", "def perform_patient_audit(self):\n \n # Continuous audit\n while True:\n \n # General patient audit (put audit results in dictionary and add to audit DataFrame)\n audit = dict()\n audit['day'] = self._env.now\n audit['negative'] = len(self._pop.negative_patients)\n audit['positive'] = len(self._pop.positive_patients)\n audit['recovered'] = len(self._pop.recovered_patients)\n audit['inpatient'] = len(self._pop.inpatients)\n audit['died'] = len(self._pop.died_patients)\n audit['total'] = len(self._pop.patients)\n audit['unallocated'] = len(self._pop.unallocated_patients)\n # Add dictionary to existing DataFrame\n self.patient_audit = \\\n self.patient_audit.append(audit, ignore_index=True)\n \n # Displaced patient audit (put audit results in dictionary and add to audit DataFrame)\n audit = dict()\n # Get displaced times\n additional_time = []\n for patient in self._pop.displaced_patients:\n additional_time.append(patient.displaced_additional_time)\n audit['day'] = self._env.now\n audit['number'] = len(self._pop.displaced_patients)\n if len(additional_time) > 0:\n # Dispalced patients exist, calculate statistics\n audit['add_time_min'] = np.min(additional_time)\n audit['add_time_1Q'] = np.quantile(additional_time, 0.25)\n audit['add_time_median'] = np.quantile(additional_time, 0.50)\n audit['add_time_3Q'] = np.quantile(additional_time, 0.75)\n audit['add_time_max'] = np.max(additional_time)\n audit['add_time_total'] = np.sum(additional_time)\n else:\n # No displaced patients exist, set all statistics to zero\n audit['add_time_min'] = 0\n audit['add_time_1Q'] = 0\n audit['add_time_median'] = 0\n audit['add_time_3Q'] = 0\n audit['add_time_max'] = 0\n audit['add_time_total'] = 0\n # Add dictionary to existing DataFrame\n self.displaced_audit = \\\n self.displaced_audit.append(audit, ignore_index=True)\n \n # Trigger next audit after interval\n yield self._env.timeout(self._params.audit_interval)", "def append_matching_units_attendance(self, units_list):\n\n data = gpd.GeoDataFrame(self._raw_data).copy()\n # convert bool in GeoDataFrame to str in order to save it\n for col in data.columns:\n if data[col].dtype in (np.bool, bool):\n data[col] = data[col].astype(str)\n # build geometry column\n long_col, lat_col = common_cfg.coord_col_names\n data = data.set_geometry([shapely.geometry.Point(xy) for xy in zip(\n data[long_col], data[lat_col])])\n\n # append attendance\n compatible_units = [u for u in units_list if\n u.service == self.servicetype]\n if compatible_units:\n unit_frame = pd.DataFrame(\n {self.id_col: [u.id for u in compatible_units],\n 'Affluenza': [u.attendance for u in compatible_units]})\n data = data.merge(unit_frame, on=self.id_col)\n\n return data", "def add_people(self, people_list):\n\n for person in people_list:\n self.add_person(person)", "def person_list(self, new_persons_list):\n self._person_list = new_persons_list\n self.__save_persons_from_memory_to_file()", "def appendedEntries(self):\n self.contact_list.append({\"name\": self.first_name.title() + \" \" + self.last_name.title(), \"phone number\": self.phone_number, \"phone number type\": self.phone_number_type})", "def append_collectable(self, newnotes: List):\n self.notes.extend(newnotes)", "def _update_all_devices(self):\n self.all_devices = []\n self.all_devices.extend(self.keyboards)\n self.all_devices.extend(self.mice)\n self.all_devices.extend(self.gamepads)\n self.all_devices.extend(self.other_devices)", "def add_to_memory(count):\n\t\tSimulation.past_attendance.append(count)\n\t\tPerson.add_to_memory(count)\n\t\tSimulation.simulation_progression.write(str(Person.recent_memory))\n\t\tSimulation.simulation_progression.write(\"\\n\")", "def getPatients(self):\n if not self.patients.choices:\n db = DBStorage()\n options = []\n for patient in db.all_patients():\n options.append((patient.id, '{} {}'.format(\n patient.name, patient.last_name)))\n self.patients.choices = options\n self.patients.default = 1", "def sync_all_lists(self):\r\n print(\"Started syncing influencer master lists with DB\")\r\n screen_names_on_lists = []\r\n self._add_or_update(screen_names_on_lists)\r\n print(\"Removing entries which are no longer on any list\")\r\n self._delete_entries_not_in_list(screen_names_on_lists) # remove entries from DB if they are on no list\r\n print(\"Sync complete\")", "def OnParticipantsChanged(properties, context):\n added = properties['participantsAdded']\n for party in added:\n Notify(context, party)", "def OnParticipantsChanged(properties, context):\n added = properties['participantsAdded']\n for p in added:\n Notify(context)", "def OnParticipantsChanged(properties, context):\n added = properties['participantsAdded']\n for p in added:\n Notify(context)", "def add(self, key, values):\n self.watchlists[key] = list(enumerate(values))", "def add_single_patient_rounds(self, rounds_list):\n patients_list = self._problem.patients_list\n busy_nurses = [rnd.nurse for rnd in rounds_list]\n available_nurses = [nurse for nurse in self._problem.nurses_list if nurse not in busy_nurses]\n for patient in patients_list:\n visited = False\n for d in rounds_list:\n if patient in d.patients_list:\n visited = True\n break\n if not visited:\n new_round = Round([patient], problem=self._problem)\n for nurse in available_nurses:\n if new_round.can_be_assigned_to(nurse):\n new_round.nurse = nurse\n rounds_list.append(new_round)\n available_nurses.remove(nurse)\n busy_nurses.append(nurse)\n break", "def append(self, dat, aduc):\n self.datelist.append(dat)\n self.adulist.append(aduc)" ]
[ "0.6347838", "0.6320095", "0.6128955", "0.6099652", "0.5824647", "0.5669928", "0.5381946", "0.5372163", "0.5362081", "0.53102386", "0.5252708", "0.52468544", "0.5185292", "0.50818354", "0.50702804", "0.5063018", "0.5057603", "0.50574607", "0.5031157", "0.50095963", "0.5009006", "0.49947184", "0.49762025", "0.4949311", "0.49435604", "0.4919855", "0.4919855", "0.49064505", "0.49038187", "0.488221" ]
0.74789894
0
Remove patient from monitor list
def remove_patient_monitor(self, patient_name): self._monitored_patients.remove_patient(patient_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear_monitor(self):\n self._monitored_patients = PatientList()", "def remove_monitor(monitor_id):\n g = mongo.db[app.config['GLOBAL_COLLECTION']]\n gdata = g.find_one(dict(), {'_id': 0})\n print(monitor_id)\n ga = GoogleAlerts(gdata['email'], gdata['password'])\n ga.authenticate()\n ga.delete(monitor_id)", "def remove_patient(self, patient_name):\n for i in range(len(self)):\n selected_patient = self._patient_list[i]\n if patient_name == selected_patient.first_name + \" \" + selected_patient.last_name:\n self._patient_list.pop(i)\n self.calculate_avg_cholesterol()\n return True\n return False", "def remove_measurement(self):\n idx = self.measurementsListWidget.currentRow()\n if len(self.mgr.obj.measurements) > 0:\n key = list(self.mgr.obj.measurements)[idx]\n del self.mgr.obj.measurements[key]\n\n # Flag the Survey as changed\n self.mgr.changed = True\n\n # Refresh lists/tables\n self.load_measurements()\n nmeas = len(self.mgr.obj.measurements)\n if nmeas > 0:\n self.measurementsListWidget.setCurrentRow(min(idx, nmeas-1))", "def remove_signal_monitor(self, mon):\n if mon not in self._monitored_signals:\n raise KeyError(\"signal monitor {} doesn't exist\".format(mon))\n uid,_=self._monitored_signals.pop(mon)\n self.unsubscribe(uid)", "def remove_reminder(self, reminder_info):\n store = self.load_data(default=[])\n if reminder_info in store:\n index = store.index(reminder_info)\n del store[index]\n self.save_data(store)", "def remove():", "def remove_patient(self, patient):\n \n # Remove patient from subunit/session (unless not allocated to subunit/session)\n if not patient.unallocated_to_session:\n self.unit_sessions_count.loc[patient.current_unit][patient.session] -= 1\n \n # Remove from appropriate _population lists (use dict to select appropriate list)\n patient_dict = {'negative': self._pop.negative_patients, \n 'positive': self._pop.positive_patients,\n 'recovered': self._pop.recovered_patients}\n patient_dict[patient.status].remove(patient)\n \n # Reset patient displacement from default unit\n patient.displaced = False\n patient.displaced_additional_time = True\n if patient in self._pop.displaced_patients:\n self._pop.displaced_patients.remove(patient)\n\n # If positive patient check if session can be re-allocated to cov negative\n if patient.status == 'positive' and patient.session != 'none':\n if self.unit_sessions_count.loc[patient.current_unit][patient.session] == 0:\n self.unit_sessions.loc[patient.current_unit][patient.session] = 'negative'", "def unmeetLoc(self, loc):\n \n to_remove = None\n for i in range(len(self.metOm)):\n if self.metOm[i][1] == loc:\n to_remove = self.metOm[i]\n if to_remove != None:\n self.metOm.remove(to_remove)\n # print(f\"\\n{to_remove[0].name} was removed from banned list because bus reached {loc}\\n\")", "def remove(self):\n\t\tcall_sdk_function('PrlVmDev_Remove', self.handle)", "def schedule_log_path_for_removal(self, monitor_name, log_path):\n pass", "def removeActorList(self, actorList):\n pass", "def del_ist(self, dev_ind: int) -> None:\n self._ist_mut.acquire()\n self.ist = list(filter(lambda i: i.indi != dev_ind, self.ist))\n if not self.ist:\n self.kill()\n self._ist_mut.release()", "def remove_device(self, path):\n pass", "def remove(self):", "def OnDeviceRemoval(self, serial_number):\r\n cam_list = self.system.GetCameras()\r\n count = cam_list.GetSize()\r\n print('System event handler:')\r\n print('\\tDevice %i was removed from the system.' % serial_number)\r\n print('\\tThere %s %i %s on the system.' % ('is' if count == 1 else 'are',\r\n count,\r\n 'device' if count == 1 else 'devices'))", "def remove(self, measurements):\n if not isinstance(measurements, Iterable):\n measurements = [measurements]\n\n notification = ContainerChange(obj=self, name='measurements')\n for measurement in measurements:\n old = self.measurements.index(measurement)\n del self.measurements[old]\n notification.add_operation('removed', (old, measurement))\n\n self.changed(notification)", "def remove_by_person_id(self, p_id):\r\n self.__repo.remove_appointment_by_person_id(p_id)", "async def remove(self, probes: Iterable[Probe]) -> None:\n for probe in probes:\n schedule = self.index.pop(probe, None)\n if schedule is not None:\n logger.info('probe-remove: %s', schedule.probe)\n # We mark a schedule as removed here and ignore it later\n schedule.removed = True\n async with self.queue_changed:\n self.queue_changed.notify()", "def removePlayer(self, index):\n serial = self.seats[index]\n self.seats[index]=0\n if serial in self.players:\n del self.players[serial]", "def collect(self, pc, controls):\n self.delete()", "def remove_measurement():\n meas_id = request.args.get('id', type=int)\n if meas_id is not None:\n db.session.query(Measurement).filter(Measurement.id == meas_id).delete()\n db.session.commit()\n\n return redirect('/measurements')", "def remove_reminders(self, reminder_infos):\n for reminder_info in reminder_infos:\n self.remove_reminder(reminder_info)", "def remove_log_path(self, monitor_name, log_path):\n pass", "def remove_item(self, uuid):\n super(ListTile, self).remove_item(uuid) # check permission\n data_mgr = ITileDataManager(self)\n old_data = data_mgr.get()\n uuids = data_mgr.get()[\"uuids\"]\n if uuid in uuids.keys():\n del uuids[uuid]\n old_data[\"uuids\"] = uuids\n data_mgr.set(old_data)", "def delete(id_patient: str):\n database = get_connection()\n col = database.patients\n query = {\"patient_data.id\": id_patient}\n col.delete_one(query)", "def deleteCreatedBy(self, caller):\n\t\tnewListeners = []\n\t\tfor lis in self.allListeners:\n\t\t\tif lis.caller == caller:\n\t\t\t\tlis.unregister()\n\t\t\telse:\n\t\t\t\tnewListeners.append(lis)\n\t\tself.allListeners = newListeners", "def on_remove_clicked(self):\n selected_indexes = self.ui.attendListView.selectedIndexes()\n for index in selected_indexes:\n row = self.attendModel.itemFromIndex(index).row()\n student = self.attendModel.item(row, 0).text()\n sid = self.attendModel.item(row, 1).text()\n try:\n # Actually add the student for the date into the database\n self.db.student_deattend(sid, self.date_string)\n except KeyError:\n # Display error window if student missing\n err_msg = QtGui.QErrorMessage()\n err_msg.showMessage(\"Sid not found for student %s\" % student)\n\n self.update_views()", "def removePlayer(self, index):\n\n self.eloList.pop(index)\n self.idList.pop(index)", "def removeDrone(self, myDrone):\n self.drones.remove(myDrone)" ]
[ "0.7407303", "0.65054905", "0.6071041", "0.6051505", "0.5979856", "0.58976924", "0.58892506", "0.5792517", "0.5757586", "0.5683679", "0.5680123", "0.56612855", "0.5654478", "0.5638736", "0.56229955", "0.5587177", "0.55870295", "0.55831707", "0.5560551", "0.5553754", "0.55349624", "0.55277735", "0.54878193", "0.54734546", "0.5463492", "0.5457196", "0.54446286", "0.54313", "0.5430534", "0.54100776" ]
0.7048552
1
Reset the monitored patients list
def clear_monitor(self): self._monitored_patients = PatientList()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resetPlayerList(self):\n self.playerList = []", "def reset_data_recorder(self):\n\n self.t_values = []\n self.x_values = []\n self.tau_values = []", "def reset(self):\n reset_system_health_series()", "def reset(self):\n self._data = []", "def resetUsers():\n global pollResults\n pollResults = dict()\n emitResults()", "def reset(self) -> None:\n\n # Just to be safe, lets make sure no multi-kill timers are gonna go off\n # for no-longer-on-the-list players.\n for p_entry in list(self._player_records.values()):\n p_entry.cancel_multi_kill_timer()\n self._player_records = {}", "def reset(self):\n\n self._problem.reset()\n self._termination_criterion.reset()\n\n self._tabu_list = TabuList(self._list_size)\n\n if self.data is not None:\n self.data = []", "def reset_wm(self):\n\n self.plan = []\n self.hist = []", "def reset(self):\n self.vna.write(edit_list(self.model))\n self.vna.write(clear_list(self.model))", "def reset(self):\n self._open_activity_count = 0\n self._decisions = []\n self._tasks = TaskRegistry()", "def reset_state(self):\n for name in self.metrics:\n self.metrics[name].reset_state()", "def reset(self):\n for provider in self.providers.values():\n provider.reset()\n\n for observation in self.observations.values():\n observation.reset()", "def clear(self):\n self.recorders = set([])\n self.reset()\n\n # Stop any currently running SpiNNaker application\n self.stop()", "def clear(self):\n self.__list = []", "def reset(self):\n for tlight in self.trafficLights:\n self.trafficLights[tlight].reset()", "def reset(self) -> None:\n self.val = None\n self.notes = []\n self.blocked = False\n self.forbidden = False", "def reset_data(self):\n self.data = []", "def reset(self) -> List[int]:\n pass", "def reset_values(self):\n\n self.values = []", "def reset(self):\n self.observation = None\n self.episode_done = True", "def clear_mem(self):\n dbe.upload_trials(self.trials, self.name)\n self.trials = []", "def reset():\n global pollResults\n for name in pollResults:\n pollResults[name] = {'state': False, 'mark': False}\n emitResults()\n emit('auth_resp', 'New poll has been started', broadcast=True)", "def _clear(self):\n self.events = []\n self.last_on = None\n self.last_off = None", "def reset_all(self) -> None:\n for metric in self:\n metric.reset()", "def reset_next_population(self):\n self.next_population = []", "def resetDeviceStates(self):", "def reset(self):\n self.dict_lock.acquire()\n self.list_lock.acquire()\n\n self.beginResetModel()\n self.levels_dict = {}\n self.view_list = []\n self.endResetModel()\n \n self.list_lock.release()\n self.dict_lock.release()", "def reset(self):\n for k,v in self.events.items():\n self.events[k] = None", "def Reset(self):\n self._results = []", "def reset(self):\n for var in self.var_list:\n var.value = None\n var.domain = copy.deepcopy(var.init_domain)" ]
[ "0.63767904", "0.63581616", "0.6337571", "0.6307555", "0.6248766", "0.6225761", "0.62064785", "0.6205428", "0.6204301", "0.6186645", "0.61765885", "0.61660004", "0.60977095", "0.6091881", "0.6090129", "0.60878223", "0.6086508", "0.6082881", "0.6076582", "0.60503036", "0.60502034", "0.6044878", "0.6033283", "0.60316586", "0.6021288", "0.60201925", "0.6019729", "0.6008815", "0.59949225", "0.59815294" ]
0.8055673
0
Add the patient to the patient list and recalculate avg cholesterol
def add_patient(self, patient): if isinstance(patient, Patient): self._patient_list.append(patient) self.calculate_avg_cholesterol()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def avg_num_visits_patient(self):\n pass", "def calculate_avg_cholesterol(self):\n total = 0\n no_of_valid_patients = 0\n for patient in self._patient_list:\n try:\n total += patient.get_cholesterol_data()[0]\n no_of_valid_patients += 1\n except AttributeError:\n continue\n except TypeError:\n continue\n if no_of_valid_patients == 0:\n return 0\n average = total/no_of_valid_patients\n self.average_cholesterol_level = average\n return average", "def add_average_donation(donor_list):\n new_length = (len(donor_list) / 3) * 4\n longer_list = []\n beg = 0\n end = 3\n while len(longer_list) < new_length:\n longer_list.extend(donor_list[beg:end])\n longer_list.append(longer_list[-2] / longer_list[-1])\n beg += 3\n end += 3\n return longer_list", "def get_mean(self):\n self.meanval = np.mean(self.adulist)", "def avg():\n\n # call sum method to add up the values in the collection & div by the num of items\n # call len method to compute the # of vals in collection which is divided by sum total \n mean = sum(inlist) / len(inlist)\n return mean \n\n # alternate method would be calling the reduce method with lamda \n # return reduce(lambda a, b: a + b, inlist) / len(inlist)", "def movavg(ave_list, length, value):\n ave_list.append(value)\n if length < len(ave_list):\n del ave_list[0]\n value = sum(ave_list)\n return value / len(ave_list)", "def average(self):\n return self.summation() / self.count()", "def data_averaging_and_cleaning(self):\n groups, film, plank = self.data_grouping()\n\n for i in groups:\n self.organized_names.append(input('Enter label name for condition ' + str(i)))\n\n self.organized_film.append(sum(film[groups.index(i)]) / len(film[groups.index(i)]))\n try:\n self.organized_plank.append(sum(film[groups.index(i)]) / (sum(film[groups.index(i)]) +\n sum(plank[groups.index(i)])))\n except ZeroDivisionError:\n self.organized_plank.append(sum(film[groups.index(i)]) / 1)", "def avg(list):\n return sum(list) / len(list)", "def avg_pay(input: list) -> float:\n pay = 0\n for emp in input:\n pay += emp.get_salary()\n return pay / len(input)", "def avg(lst: list):\n return sum(lst) / len(lst)", "def average(lst):\n return sum(lst)/len(lst)", "def donation_avg(donor_list, donor):\n return sum(donor_list[donor]) // len(donor_list[donor])", "def getCurrentAverage(examList, projectList, labList, adjPoints=0):\n \n totalPoints = 1000 if not adjPoints else adjPoints\n grades = examList + projectList + labList # concat into one list to calc the average\n return sum(grades) / totalPoints", "def average_emission(data: List[EmissionPerCapita], current_year: int) -> float:\r\n\r\n index = current_year - data[0].start_year # get the index for current year\r\n\r\n # Get all emissions from that year.\r\n current_year_emissions = []\r\n for countries in data:\r\n current_year_emissions.append(countries.epc_year[index])\r\n\r\n average = sum(current_year_emissions) / len(data)\r\n return average", "def get_mean(self):\r\n for i in range(1,len(self.data[0])):\r\n self.prom.append(np.mean(self.data[:,i]))", "def _avg(cls, l):\n\n return sum(l) / float(len(l))", "def calcAverage(dat):\n return sum(dat)/len(dat)", "def get_patient_data(self, client):\n for patient in self._monitored_patients.get_patient_list():\n # print(\"Requesting data for \" + patient.first_name+\" \"+patient.last_name+\"...\")\n patient.update_data(client.get_patient_data(patient.id))", "def perform_patient_audit(self):\n \n # Continuous audit\n while True:\n \n # General patient audit (put audit results in dictionary and add to audit DataFrame)\n audit = dict()\n audit['day'] = self._env.now\n audit['negative'] = len(self._pop.negative_patients)\n audit['positive'] = len(self._pop.positive_patients)\n audit['recovered'] = len(self._pop.recovered_patients)\n audit['inpatient'] = len(self._pop.inpatients)\n audit['died'] = len(self._pop.died_patients)\n audit['total'] = len(self._pop.patients)\n audit['unallocated'] = len(self._pop.unallocated_patients)\n # Add dictionary to existing DataFrame\n self.patient_audit = \\\n self.patient_audit.append(audit, ignore_index=True)\n \n # Displaced patient audit (put audit results in dictionary and add to audit DataFrame)\n audit = dict()\n # Get displaced times\n additional_time = []\n for patient in self._pop.displaced_patients:\n additional_time.append(patient.displaced_additional_time)\n audit['day'] = self._env.now\n audit['number'] = len(self._pop.displaced_patients)\n if len(additional_time) > 0:\n # Dispalced patients exist, calculate statistics\n audit['add_time_min'] = np.min(additional_time)\n audit['add_time_1Q'] = np.quantile(additional_time, 0.25)\n audit['add_time_median'] = np.quantile(additional_time, 0.50)\n audit['add_time_3Q'] = np.quantile(additional_time, 0.75)\n audit['add_time_max'] = np.max(additional_time)\n audit['add_time_total'] = np.sum(additional_time)\n else:\n # No displaced patients exist, set all statistics to zero\n audit['add_time_min'] = 0\n audit['add_time_1Q'] = 0\n audit['add_time_median'] = 0\n audit['add_time_3Q'] = 0\n audit['add_time_max'] = 0\n audit['add_time_total'] = 0\n # Add dictionary to existing DataFrame\n self.displaced_audit = \\\n self.displaced_audit.append(audit, ignore_index=True)\n \n # Trigger next audit after interval\n yield self._env.timeout(self._params.audit_interval)", "def func(lst):\n tot = 0\n for i in lst:\n tot = tot + i\n avg = tot / len(lst)\n return avg", "def get_patient_average():\n r = requests.get(\"http://vcm-7474.vm.duke.edu:5000/api/heart_rate/average/2\")\n print(r.text)", "def average(self):\n return (self.current + self.last) / 2.0", "def get_average(self, s_freq, e_freq):\n s_ind = self.get_bin(s_freq)\n e_ind = self.get_bin(e_freq)\n lst = self.mags[s_ind:e_ind+1]\n try:\n avg = sum(lst)/len(lst)\n except:\n print(s_ind, e_ind)\n print('werid stuff')\n avg = 0\n return avg", "def append(self, sample):\n self.samples.append(sample)\n self.total += sample\n while len(self.samples) > self.maxlen:\n self.total -= self.samples.popleft()\n self.mean = float(self.total) / len(self.samples)", "def averageDominationCount(leaf):\n averageDominationCount = np.nanmean(leaf.calDominationCount())\n return averageDominationCount", "def redistribute_occ(occ_list):\n\n occ_list_new = occ_list[:]\n\n if sum(occ_list_new) / len(occ_list_new) > 5: # pragma: no cover\n msg = 'Average number of occupants per apartment is higher than 5.' \\\n ' This is not valid for usage of Richardson profile generator.'\n raise AssertionError(msg)\n\n # Number of occupants to be redistributed\n nb_occ_redist = 0\n\n # Find remaining occupants\n # ###############################################################\n for i in range(len(occ_list_new)):\n if occ_list_new[i] > 5:\n # Add remaining occupants to nb_occ_redist\n nb_occ_redist += occ_list_new[i] - 5\n # Set occ_list_new entry to 5 persons\n occ_list_new[i] = 5\n\n if nb_occ_redist == 0:\n # Return original list\n return occ_list_new\n\n # Identify empty apartments and add single occupant\n # ###############################################################\n for i in range(len(occ_list_new)):\n if occ_list_new[i] == 0:\n # Add single occupant\n occ_list_new[i] = 1\n # Remove occupant from nb_occ_redist\n nb_occ_redist -= 1\n\n if nb_occ_redist == 0:\n # Return original list\n return occ_list_new\n\n # Redistribute remaining occupants\n # ###############################################################\n for i in range(len(occ_list_new)):\n if occ_list_new[i] < 5:\n # Fill occupants up with remaining occupants\n for j in range(5 - occ_list_new[i]):\n # Add single occupant\n occ_list_new[i] += 1\n # Remove single occupant from remaining sum\n nb_occ_redist -= 1\n\n if nb_occ_redist == 0:\n # Return original list\n return occ_list_new\n\n if nb_occ_redist: # pragma: no cover\n raise AssertionError('Not all occupants could be distributed.'\n 'Check inputs and/or redistribute_occ() call.')", "def CreateAccAvg(self):\n \n if len(self.Accel[\"X\"]) is 0:\n raise ValueError(\"Accel Category is empty\")\n self.AccelAvg = []\n\n for item in range(len(self.Accel[\"X\"])):\n #for axis in [\"X\",\"Y\",\"Z\"]:\n # if type(self.Accel[axis][item]) != type(123.345):\n # raise ValueError(\"non-number included in Accel bank. Use formatAllToFloat() to remove strings.\")\n self.AccelAvg.append((float(self.Accel[\"X\"][item]) + float(self.Accel[\"Y\"][item]) + float(self.Accel[\"Z\"][item])) / 3)", "def add_patient_monitor(self, patient_name):\n for patient in self._patient_list.get_patient_list():\n if patient_name == patient.first_name + \" \" + patient.last_name:\n self._monitored_patients.add_patient(patient)", "def avg(l):\n return (sum(l)/float(len(l)))" ]
[ "0.60794705", "0.5896264", "0.5746469", "0.56437236", "0.5553276", "0.5531608", "0.55159146", "0.54730564", "0.5434685", "0.5412077", "0.5378525", "0.5354515", "0.5350805", "0.5337589", "0.5321583", "0.53007406", "0.529966", "0.5275353", "0.5252502", "0.52446043", "0.5235613", "0.52290595", "0.52273464", "0.5227125", "0.52132195", "0.5205476", "0.5184046", "0.5178534", "0.5176126", "0.5171503" ]
0.7625794
0
Remove patient with the given name
def remove_patient(self, patient_name): for i in range(len(self)): selected_patient = self._patient_list[i] if patient_name == selected_patient.first_name + " " + selected_patient.last_name: self._patient_list.pop(i) self.calculate_avg_cholesterol() return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove(name):", "def remove(name):\n del person_database[name]", "def rm_person():\n # get person name from user\n responses = accept_inputs([\"Person name\"])\n person_name = responses[\"Person name\"]\n # check for existence of person\n results = query_with_results(\"select id from person where name = ?\", [person_name])\n if len(results) == 0:\n print(\"No person found with name '%s' that we could remove.\" % person_name)\n return\n # the person exists, so remove it\n query_no_results(\"delete from person where name = ?\", [person_name])\n # remove all associations with tasks\n query_no_results(\"delete from task_person_pair where person = ?\", [results[0][0]])\n print(\"Person with name '%s' removed.\" % person_name)", "def remove_patient_monitor(self, patient_name):\n self._monitored_patients.remove_patient(patient_name)", "def deleteInstrumentFromName(self, name):\n matching_instruments = list(filter(lambda x: x.name == name,\n self.instruments))\n assert len(matching_instruments) == 1\n del self.instruments[name]", "def delete(self, name):\n\n pass", "def delete(id_patient: str):\n database = get_connection()\n col = database.patients\n query = {\"patient_data.id\": id_patient}\n col.delete_one(query)", "def remove(self, name):\n raise NotImplementedError", "def remove_person(self, per: str):\n if per in self._people:\n self._people.remove(per)\n else:\n raise IDDoesNotExist", "def removePatron(self, name):\n patron = self._patrons.pop(name, None)\n if patron == None:\n return \"Patron's name is not in the library\"\n elif patron.getNumBooksOut() > 0:\n for book in self._books.values():\n if patron == book.getPatron():\n book.returnMe()\n return None", "def delete(self, name):\n if name in self._dict:\n self._dict.pop(name)\n self.save()\n else:\n raise PoseError(\"%s is not in database\" % _name)", "def remove_by_person_id(self, p_id):\r\n self.__repo.remove_appointment_by_person_id(p_id)", "def remove_record():\n # could use .../record/<name> in URL or as in this case as an argument .../record?name=bob\n if 'name' not in request.args:\n return \"need a name to delete a record!\", 400\n with RECORD_LOCK:\n if len([r for r in RECORDS if r.get('name') == request.args.get('name')]) == 0:\n return \"no such record found!\", 409\n RECORDS[:] = [r for r in RECORDS if r.get( # copy all but name matches\n 'name') != request.args.get('name')]\n return \"OK\"", "def delete(self, name=None):\n raise NotImplementedError", "def remove(self, name: str) -> None:\n del self.components[name]", "def delete(self, name):\n assert name, \"Must input a valid dataset name.\"\n self.manager.delete_data(name)", "def remove_dataset(self, name):\n payload = {\"name\": name}\n r = self.request(\n \"delete\", url_path_join(USER_DATASET_RESOURCE_URL, self.owner), payload=payload\n )\n self.check_and_raise(r)", "def remove_by_name(self, values, name):\n for d in values:\n if d[\"name\"] == name:\n values.remove(d)\n return", "def remove_person(self, document):\n del self.__people[document]", "def delete_pet(self, p_name):\r\n for pets in range(0, len(self.pet_file)):\r\n if self.pet_file[pets][\"pet name\"] == p_name:\r\n self.pet_file.pop(pets) # remove requested pet dict\r\n break # leave 'for loop' to prevent out of index error\r\n with open(self.pet_file_name, 'w') as outfile:\r\n json.dump(self.pet_file, outfile) # confirm changes in json file\r", "def remove(self, name):\n if self.circles.has_key(name):\n del self.circles[name]\n self.cursor.execute(\"\"\"DELETE FROM sensors_powersensor WHERE target=%s\"\"\", (name,))", "def remove_resource(self, name):\n self._NDL_API('removeresource', { 'vm': name, }, None)", "def drem(self, name):\n return self.rem(name)", "def rm(self, name: str) -> None:\n path = self.get_path(name)\n if os.path.exists(path):\n os.remove(path)", "def remove_dataset(cls, name):\n gxapi_cy.WrapSTORAGEPROJECT._remove_dataset(GXContext._get_tls_geo(), name.encode())", "def remove_constraint_by_name(self, name):\n indices = self._constraints_df.index[self._constraints_df['name'] == name].tolist()\n self._remove_constraint_by_indices(indices)\n self._update_flag = True", "def remove_sponsor(contest, name):\n sponsors = pcm.Sponsor.get_sponsors_for_biv_id(contest, False)\n found = False\n for sponsor in sponsors:\n if (sponsor.display_name == name):\n db.session.delete(pam.BivAccess.query.filter_by(\n source_biv_id=contest,\n target_biv_id=sponsor.biv_id).one())\n db.session.delete(sponsor)\n print('deleted sponsor {}'.format(name))\n found = True\n if not found:\n print('no sponsor found for name: {}'.format(name))", "def remove_mix(self, name: str) -> None:\n self.remove(name)", "def remove(self,s):\n \n p1, p2 = self.persons\n \n p1.remove_partner(p2,s)\n p2.remove_partner(p1,s)", "def removePlayer(df, name):\n if name in getPlayerList(df):\n df = df[df.name != name]\n return df" ]
[ "0.750902", "0.7405967", "0.6991705", "0.6893029", "0.6882131", "0.67190564", "0.6682343", "0.6679356", "0.6566834", "0.6520179", "0.6490527", "0.6459752", "0.6419736", "0.6412036", "0.6363453", "0.63517666", "0.6299661", "0.629465", "0.6292144", "0.62912965", "0.6287894", "0.6258819", "0.62098634", "0.6176546", "0.61692435", "0.6163428", "0.6153478", "0.61522484", "0.6152098", "0.6134824" ]
0.75734454
0
Getter method for the list of patients
def get_patient_list(self): return self._patient_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_patients(self):\n return", "def get_patient_list(self, client):\n self._patient_list = client.get_patient_list(self.id)", "def get(self):\n all_patients = model_patient.query.all()\n return jsonify(all_patients)", "def getPatients(self):\n if not self.patients.choices:\n db = DBStorage()\n options = []\n for patient in db.all_patients():\n options.append((patient.id, '{} {}'.format(\n patient.name, patient.last_name)))\n self.patients.choices = options\n self.patients.default = 1", "def patients(request):\n try:\n doctor = Doctor.objects.get(user=request.user)\n except Doctor.DoesNotExist:\n raise Http404(\"Doctor with current user instance not found!\")\n\n #filter patients from asymptomatics\n patient_objects = doctor.patients.all()\n patients = []\n\n for patient in patient_objects:\n if patient.asymptomatic == False:\n patients.append(patient)\n\n\n #now get the doctors patient instances\n patients = get_patients_list(patients)\n\n return JsonResponse({\n \"patients\": patients\n })", "def get_patient_names(self):\n\t# use pre-defined patient names\n\tif (self.data_names is not None):\n\t\tassert (os.path.isfile(self.data_names))\n\t\twith open(self.data_names) as f:\n\t\t\tcontent = f.readlines()\n\t\tpatient_names = [x.strip() for x in content]\n\t# use all the patient names in data_root\n\telse:\n\t\tpatient_names = os.listdir(self.data_root[0])\n\t\tpatient_names = [name for name in patient_names if 'brats' in name.lower()]\n\treturn patient_names", "def get_members(self):\n return sorted([x[\"patient\"] for x in self.pedigree])", "def get_members(self):\n return sorted([x[\"patient\"] for x in self.get_filtered_pedigree_with_samples()])", "def get_queryset(self):\n username = self.request.user.username\n patient = UniquePatient.objects.filter(patient__user__username=username)\n return patient", "def get_patient_data(self, client):\n for patient in self._monitored_patients.get_patient_list():\n # print(\"Requesting data for \" + patient.first_name+\" \"+patient.last_name+\"...\")\n patient.update_data(client.get_patient_data(patient.id))", "def see_all():\n database = get_connection()\n patients_in_db = []\n patient: dict = database.patients.find()\n for p in patient:\n pat = p[\"patient_data\"]\n patients_in_db.append(pat)\n print(patients_in_db)\n return patients_in_db", "def get_persons(self):\n return self.person_list.model().get_person_list()", "def get_patients_in_ic(self):\n\n query = \"SELECT * FROM patients WHERE datetime_discharge IS NULL\"\n\n return self.mysql_obj.fetch_rows(query)", "def get_patient_dict():\r\n return common.get_dict_all(get_patient_filename(), None)", "def getList(self):\n\treturn self.list", "def getList(self):\n return self.list", "def get_patient_cases(patient):\n # ----- Get database connection\n db = connect_to_db()\n try:\n c1 = db.cursor()\n try:\n c1.execute(\n \"\"\"SELECT tc.SLABEL \"\"\"\n \"\"\"FROM BOM.PATIENT pt \"\"\"\n \"\"\" INNER JOIN BOM.TCASE tc ON pt.SUID = tc.SPATIENTUID \"\"\"\n \"\"\"WHERE \"\"\"\n \"\"\" pt.SID = '%s' \"\"\" %\n patient)\n res = c1.fetchall()\n cases = []\n for re in res:\n cases.append(re[0])\n finally:\n c1.close()\n finally:\n db.close()\n return cases", "def persons(self):\r\n return persons.Persons(self)", "def getList(self):\n return self.list_", "def get(self, offset, limit):\n try:\n patients_list = model_patient.query.order_by(\n model_patient.id.asc()\n ).paginate(offset, per_page=limit)\n\n list_item =[]\n for row in patients_list.items:\n list_item.append(row.serialize())\n\n except SQLAlchemyError:\n flash(\"No data in database\")\n patients_list = None\n\n return jsonify({'offset': offset, 'limit': limit, 'search_result': list_item})", "def Obtener_Lista(self):\n\t\treturn [self,self.Nombre+\" \"+self.Apellido,self.ID,self.Fecha, \n\t\tself.Edad,self.Test,self.Posicion,self.Prioridad,self.Progreso,self.Informe]", "def get(self):\n with open_session() as session:\n try:\n records = session.query(Patient).all()\n except NoResultFound:\n logger.info(\"No record found\") # TODO: remove debugging\n return gen_response(\"No result found\")\n except Exception as error:\n logger.exception(\"Exeption: %s\" % (str(error)))\n return gen_response(\"Internal server error\")\n\n # Build the response list\n rlist = [to_dict(record) for record in records]\n return gen_response(rlist)", "def getList(self):", "def getList(self):", "def results(self):\n q = self.cleaned_data['q'].strip()\n patients = PatientInformation.objects.filter(Q(operator__username__contains=q) | \\\n Q(patient_id__contains=q) | Q(first_name__contains=q) | Q(last_name__contains=q) | \\\n Q(email__contains=q)).distinct()\n return patients", "def getList(self):\n pass", "def ajax_patients_list(request, doctor_id):\n\n template = 'patients_list.html'\n if request.is_ajax():\n patients = Doctor.objects.get(pk=doctor_id).patients.all()\n return TemplateResponse(request, template, {'patients': patients})\n else:\n return HttpResponseRedirect(reverse('index'))", "def get_patient_fields(connection, patient_id):\n patient_id = str(patient_id)\n\n patient_field_results = pymedphys.mosaiq.execute(\n connection,\n \"\"\"\n SELECT\n TxField.FLD_ID,\n TxField.Field_Label,\n TxField.Field_Name,\n TxField.Version,\n TxField.Meterset,\n TxField.Type_Enum,\n Site.Site_Name\n FROM Ident, TxField, Site\n WHERE\n TxField.Pat_ID1 = Ident.Pat_ID1 AND\n TxField.SIT_Set_ID = Site.SIT_Set_ID AND\n Ident.IDA = %(patient_id)s\n \"\"\",\n {\"patient_id\": patient_id},\n )\n\n table = pd.DataFrame(\n data=patient_field_results,\n columns=[\n \"field_id\",\n \"field_label\",\n \"field_name\",\n \"field_version\",\n \"monitor_units\",\n \"field_type\",\n \"site\",\n ],\n )\n\n table.drop_duplicates(inplace=True)\n\n table[\"field_type\"] = [FIELD_TYPES[item] for item in table[\"field_type\"]]\n\n return table", "def get_queryset(self):\n username = self.request.user.username\n patient = UniquePatient.objects.get(patient__user__username=username)\n u_id = patient.u_Id\n _prescription = Prescription.objects.filter(u_id=u_id,is_checkout=False).order_by('-id')[:1]\n return _prescription", "def hospital_list(self, request, **dict):\n\t\tdata = self.get_serializer(self.get_queryset(), many=True).data\n\t\treturn Response(data, status.HTTP_200_OK)" ]
[ "0.83076537", "0.75237614", "0.7165529", "0.7019929", "0.63093877", "0.6288992", "0.62888664", "0.61578315", "0.6133857", "0.60420316", "0.59725815", "0.59300333", "0.59086293", "0.5895699", "0.5863118", "0.5838831", "0.5823298", "0.5802317", "0.5790686", "0.5783136", "0.5761313", "0.5748542", "0.5737039", "0.5737039", "0.5730293", "0.56776744", "0.56550103", "0.5640427", "0.5625369", "0.56160325" ]
0.86567974
0
Find patient with the given name
def select_patient(self, patient_name): for i in range(len(self)): selected_patient = self._patient_list[i] if patient_name == selected_patient.first_name + " " + selected_patient.last_name: return selected_patient return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def search(self, name):\n\n name = name.lower().strip()\n exact_names = get_close_matches(name, self.possible_names, n=1)\n if not exact_names:\n return None\n else:\n exact_name = exact_names[0]\n id = self.df_possible_names[self.df_possible_names['name'] == exact_name].index[0] \n return self.df_possible_names.loc[id, 'id']", "def search_for_name(self, name):\n for p in self.books_all:\n if p['name'] == name:\n return p", "def search_by_name(self, name):\r\n return self.__filter(self.get_all_persons(), lambda x: name.lower().strip() in x.name.lower().strip())", "def find(self, name):\n return Search(self.request).find(name)", "def find_person(name):\n if ' ' in name:\n name = name.replace(',', '')\n else:\n return None\n\n try:\n (first, last) = name.split(' ', 1)\n return Person.get(Person.first_name ** first, Person.last_name ** last)\n except Person.DoesNotExist:\n pass\n\n try:\n (last, first) = name.split(' ', 1)\n return Person.get(Person.first_name ** first, Person.last_name ** last)\n except Person.DoesNotExist:\n pass\n\n return None", "def search(self,name=None):\n\t\taddresses = discover_devices()\n\t\t#if len(addresses) == 0:\n\t\t#\treturn None\n\t\tnames = []\n\t\tfor adr in addresses:\n\t\t\tnames.append(lookup_name(adr))\n\t\t\tif name != None and name == names[-1]:\n\t\t\t\treturn adr\n\n\t\treturn zip(addresses,names)", "def by_name(name):\n devices = discover()\n\n for device in devices or []:\n if device.player_name == name:\n return device\n return None", "def searchByName(database):\n firstname=str(input(\"What is his first name :\"))\n usr,find=getByName(database,firstname)\n if find:\n print(usr)", "def find_by_name(name):\n return repository.find_by_name(name)", "def find_by_name ( self, name, **kw ):\n try:\n return next ( self.find_all_by_name ( name, **kw ) )\n except StopIteration:\n return None", "def find_by_name(command, name): # fine\r\n if command == 'FindByFName':\r\n for student in StudentRoster:\r\n if name == student.first:\r\n print(student_format(student))\r\n elif command == 'FindByLName':\r\n for student in StudentRoster:\r\n if name == student.last:\r\n print(student_format(student))", "def find_by_name(self, name):\n return self.get(name)", "async def find_device_by_name(\n cls, name: str, timeout: float = 10.0, **kwargs\n ) -> Optional[BLEDevice]:\n return await cls.find_device_by_filter(\n lambda d, ad: ad.local_name == name,\n timeout=timeout,\n **kwargs,\n )", "def find(cls, device_name):\n return cls.query(cls.device_name == device_name).fetch(1)", "def __ui_search_persons_by_name(self):\n searched_name = input(\"Introduce the name: \").strip().lower()\n if searched_name == \"\":\n print(\"You cannot search persons by an empty name!\\n\")\n return\n\n searched_persons = self.__person_service.find_persons_by_name(searched_name)\n\n if len(searched_persons) == 0:\n print('There is no person whose name contains \"{}\"!\\n'.format(searched_name))\n else:\n print(\"\")\n for person in searched_persons:\n print(person)\n print(\"\")", "def find_by_name(cls, name: str):\n cls.logger.info(\"Processing name query for %s ...\", name)\n return cls.query.filter(cls.name == name).order_by(cls.id).all()", "def search_player_by_name(players_table, name):\r\n result = players_table.search(Query().Nom == name)\r\n print(result)", "def search_food(cls, name):\n obj = cls.objects(name=name).first()\n return obj", "def find(self, name):\n congressman_list = self.deputies + self.senators\n for congressman in congressman_list:\n if congressman.name == name:\n return congressman\n return None", "def get_patient(self, id_examen):\n\n self.logger.info(\"\\t[+] get_patient [+]\")\n self.logger.info(f\"\\t[+] id_examen {id_examen} [+]\")\n\n try:\n # Select the row and retrieve the id\n id_patient = list(self.examens.select().where(self.examens.columns.id_examen == id_examen).execute())[0][1]\n\n if(id_patient):\n return self.patients.select().where(self.patients.columns.id_patient == id_patient).execute()\n else:\n self.logger.warning(f\"\\t [-] Patient not found {id_patient} [-]\")\n return False\n\n except Exception as e:\n self.logger.critical(\"\\t[-] Exception occured [-]\")\n self.logger.critical(\"\\t\" + str(e))\n self.logger.critical(\"\\t[-] Exception occured [-]\")", "def get_employee_by_name(self, name):\n self.lock.acquire()\n for employee in self.__Session.query(Employee).all():\n if (employee.fname+' '+employee.lname == name):\n result = employee\n self.lock.release()\n return result", "def find_by_name(self, name: str, owner: Optional[str]) -> Optional[DatasetDB]:\n dataset = DatasetDB(name=name, owner=owner, task=TaskType.text_classification)\n document = self._es.get_document_by_id(\n index=DATASETS_INDEX_NAME, doc_id=dataset.id\n )\n if not document:\n # We must search by name since we have no owner\n results = self._es.list_documents(\n index=DATASETS_INDEX_NAME,\n query={\"query\": {\"term\": {\"name.keyword\": name}}},\n )\n results = list(results)\n if len(results) == 0:\n return None\n\n if len(results) > 1:\n raise ValueError(\n f\"Ambiguous dataset info found for name {name}. Please provide a valid owner\"\n )\n\n document = results[0]\n return self._es_doc_to_dataset(document) if document else None", "def find_donor(name):\n for donor in OO_mailroom.Donor.donor_db:\n # do a case-insensitive compare\n if name.strip().lower() == donor[0].lower():\n return donor\n\n return None", "def by_name(cls, name):\n return cls.all().filter('name =', name).get()", "def __extract_patient_name (self, r=None):\n\t\tif self._file :\n\t\t\t#r = re.search(r'Name:\\s+(.+?)(?=Visit|MRN|\\d+)', self._file.text, re.I)\n\t\t\tr = re.search(r'Name:\\s+(.+?)(?=\\n)', self._file.text, re.I)\n\t\t\tassert r, \"Patient Name could not be derived from OCR text!\"\n\t\t\tr = r.groups()[0]\n\t\treturn r or None", "def get_by_name(cls, name):\n return cls.query.filter(cls.name == name).first()", "def query_by_name(name):\n\tstudent = session.query(Student).filter_by(\n\t\tname=name).first()\n\treturn student", "def test_05_get_person_by_name(self):\n p1 = Person.query.first()\n p1_data = p1.wrap()\n p1_f_name = p1_data[\"first_name\"]\n # find by first name only\n # get part of name and search\n q_string = \"?first_name={}\".format(p1_f_name[:3]) # TODO - verify the length\n rv = self.app.get('persons', query_string=q_string)\n data = json.loads(rv.data)\n self.assertEqual(data[\"count\"], 1)\n\n # find by first name and last name\n p1_l_name = p1_data[\"last_name\"]\n q_string = \"?first_name={}&last_name={}\".format(p1_f_name[:3], p1_l_name)\n rv = self.app.get('persons', query_string=q_string)\n data = json.loads(rv.data)\n self.assertEqual(data[\"count\"], 1)\n\n # find by first name and non-existing last name\n q_string = \"?first_name={}&last_name={}\".format(p1_f_name[:3], \"iAmNotThere\")\n rv = self.app.get('persons', query_string=q_string)\n data = json.loads(rv.data)\n self.assertEqual(data[\"count\"], 0)", "def query_by_person(self, name: str) -> dict:\n if not self.client:\n self.connect()\n return self.client.moviebuff.castcrew.find_one({'Name': name})", "def find_resource_by_name(self, resource_name, value):\n try:\n filters = {'name': value}\n obj = getattr(self.client(), resource_name)\n obj_list = obj.find(**filters)\n except sahara_base.APIException as ex:\n raise exception.Error(\n _(\"Error retrieving %(entity)s list from sahara: \"\n \"%(err)s\") % dict(entity=resource_name,\n err=six.text_type(ex)))\n num_matches = len(obj_list)\n if num_matches == 0:\n raise exception.EntityNotFound(entity=resource_name or 'entity',\n name=value)\n elif num_matches > 1:\n raise exception.PhysicalResourceNameAmbiguity(\n name=value)\n else:\n return obj_list[0].id" ]
[ "0.65410453", "0.64271235", "0.63665813", "0.633591", "0.6325891", "0.62507224", "0.62497336", "0.62015235", "0.61805177", "0.6152043", "0.6144992", "0.61369663", "0.61330307", "0.6123447", "0.61093545", "0.6081271", "0.608047", "0.607569", "0.599046", "0.59851915", "0.59836495", "0.5955878", "0.59545445", "0.5943453", "0.59397733", "0.59381557", "0.59222627", "0.5904696", "0.5904098", "0.58576304" ]
0.7499407
0
Calculate average cholesterol of all patients If patient has no cholesterol data, they are ignored
def calculate_avg_cholesterol(self): total = 0 no_of_valid_patients = 0 for patient in self._patient_list: try: total += patient.get_cholesterol_data()[0] no_of_valid_patients += 1 except AttributeError: continue except TypeError: continue if no_of_valid_patients == 0: return 0 average = total/no_of_valid_patients self.average_cholesterol_level = average return average
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def averageDominationCount(leaf):\n averageDominationCount = np.nanmean(leaf.calDominationCount())\n return averageDominationCount", "def calcAverage(dat):\n return sum(dat)/len(dat)", "def batting_average(df,start_year,end_year,bat_met,player_name):\n\n base_fields = ['H','AB']\n emp_list = check_base_fields(df,base_fields)\n\n if not emp_list:\n return round(df['H'].sum(axis = 0) / df['AB'].sum(axis = 0),3)\n\n else:\n df = original_dataframe(start_year,end_year,bat_met+emp_list,player_name)\n return round(df['H'].sum(axis = 0) / df['AB'].sum(axis = 0),3)", "def average_concentration():\n x10 = 0 # HCl gas fraction on the droplet surface, [/]\n x30 = pwater / pre # water vapour fraction in the pipe, [/]\n x11 = phcl / pre # HCl gas fraction in the pipe, [/]\n x31 = 12.3e-3 # water vapour fraction on the droplet surface, [/]\n if x30 < 0.02 and x31 < 0.02:\n x30 = 0.0\n x31 = 0.0 # the fraction is pretty low, thus neglect the water part\n x20 = 1 - x10 - x30 # other gas fraction on the droplet surface, [/]\n x21 = 1 - x11 - x31 # other gas fraction in the pipe, [/]\n x1d = x10 - x11 # HCl fraction difference, [/]\n x1_bar = (x10 + x11) / 2 # HCl average fraction, [/]\n x2d = x20 - x21 # Natural gas fraction difference, [/]\n x2_bar = (x20 + x21) / 2 # natural gas average fraction, [/]\n x3d = x30 - x31 # water fraction difference, [/]\n x3_bar = (x30 + x31) / 2 # water average fraction, [/]\n return x1d, x1_bar, x2d, x2_bar, x3d, x3_bar", "def get_average(self):\n self.avg = math.floor((self.maths + self.phy + self.che) / 3, )\n self.assign_grade()\n return self.avg\n # End of method get_average", "def get_mean(self):\n mean = np.array(np.zeros((4,8)))\n for i,c in enumerate(self.cellLines):\n for j,l in enumerate(self.ligands):\n mean[i][j] = self.aucs[c][l]['mean']\n return mean", "def average_age():\n df = pd.read_csv(config.META_FQN, sep=\"\\t\")\n ages = []\n for _, row in df.iterrows():\n if row[\"asr_test\"]:\n age = row[\"Age_ses1\"]\n if not math.isnan(age):\n ages.append(age)\n\n print(\"------ Age ------\")\n print_stats(ages)", "def average(self):\n return self.summation() / self.count()", "def avg_after_harry():\n copy = movies.copy()\n copy = copy.sort_values(['Year']).reset_index(drop = True) #years early to present\n harry_years = copy[copy['#1 Movie'].str.contains('Harry')].Year #years where harry potter was #1\n next_years = harry_years + 1\n check = list(next_years.values)\n next_years_df = copy[copy['Year'].isin(check)]\n avg = next_years_df['Number of Movies'].mean()\n if avg is np.nan:\n raise\n return ('avg_after_harry', avg)", "def getAverage(die, numRolls, numTrials):", "def get_mean(self):\n try:\n return sum(self.speakers.values()) / len(self.speakers)\n except (ZeroDivisionError):\n return 0.0", "def get_roaming_distance_mean_per_weekday(cab_data):\n\n try:\n roaming_distance_mean_per_weekday = (\n cab_data\n .pipe(_select_columns, ['occupancy', 'time', 'cab_id', 'distance'])\n .pipe(_select_vacant)\n .pipe(_add_day)\n .pipe(_add_weekday)\n .pipe(_get_total_roaming_distance_per_day_and_cab)\n # .pipe(_remove_outliers, contamination=0.01)\n .pipe(_get_roaming_distance_per_weekday_metrics)\n )\n except Exception as error:\n error_catching(error)\n\n return roaming_distance_mean_per_weekday", "def _get_mean(self):\n return self._get_conditional_negative_energy()", "def avg_num_visits_patient(self):\n pass", "def average_width(orthotope):\n return np.mean(orthotope[1] - orthotope[0])", "def average_gift(self):\n try:\n return self.total_donations / self.number_of_donations\n except ZeroDivisionError:\n return 0.0", "def get_mean(self):\r\n for i in range(1,len(self.data[0])):\r\n self.prom.append(np.mean(self.data[:,i]))", "def mean_height(data):", "def mean_height(data):", "def calccalmean(self,blk):\n calind=self.getcalind(blk)\n x=self.spec[calind,:]\n return np.nanmean(x,axis=0)", "def avg(year):\r\n df = ouvrir_fichier()\r\n df = df.loc[df[\"year\"].isin([year])]\r\n df = df[(\r\n df[\r\n \"emissions\"\r\n ] == 'Emissions (thousand metric tons of carbon dioxide)'\r\n )]\r\n print(df)\r\n mean_value = df.mean()['value']\r\n resultat = {}\r\n resultat[\"year\"] = year\r\n resultat['total'] = float(mean_value)\r\n print(mean_value)\r\n return resultat", "def getCMean(inp):\n\tinp = sorted(inp, key = lambda x: x[0])\n\treturn 1 - getClearWaterDepth(inp) / getY90(inp)", "def average(array):\n unique_vals = set(array)\n return sum(unique_vals) / len(unique_vals)\n\n \n # your code goes here", "def harmonic_mean(self):\n return self.count() / sum(1/number for number in self.numbers)", "def mean_calc(data, col):\n\tm = sum([row[col] for row in data]) / len(data)\n\treturn m", "def har_mean(array):\n return ((sum([1/x for x in array]))**(-1))*len(array)", "def get_average(self, s_freq, e_freq):\n s_ind = self.get_bin(s_freq)\n e_ind = self.get_bin(e_freq)\n lst = self.mags[s_ind:e_ind+1]\n try:\n avg = sum(lst)/len(lst)\n except:\n print(s_ind, e_ind)\n print('werid stuff')\n avg = 0\n return avg", "def process_data_p1(data):\r\n return data[[\"CONTROL\", \"Academic Year\", \"MD_EARN_WNE_P10\"]] \\\r\n .groupby([\"CONTROL\", \"Academic Year\"], as_index=False).mean()", "def donation_avg(donor_list, donor):\n return sum(donor_list[donor]) // len(donor_list[donor])", "def get_average(data):\n average = sum(data) / len(data)\n\n return average" ]
[ "0.6298497", "0.61703765", "0.603771", "0.59793043", "0.5961882", "0.5876234", "0.58576447", "0.58462703", "0.58457214", "0.58445007", "0.58436364", "0.58422744", "0.5831334", "0.57236063", "0.57181925", "0.56984377", "0.5671662", "0.5671279", "0.5671279", "0.56465757", "0.56365764", "0.56356335", "0.5634909", "0.5628098", "0.56125027", "0.56045985", "0.5600912", "0.55952805", "0.55896956", "0.55526805" ]
0.7487059
0
Helper to create a Variable stored on CPU memory.
def _variable_on_cpu(name, shape, initializer, trainable = True): with tf.device('/cpu:0'): var = tf.get_variable(name, shape, initializer=initializer, trainable = trainable) return var
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _variable_on_cpu(name, shape, initializer):\n with tf.device('/cpu:0'):\n dtype = tf.float32\n var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)\n return var", "def _variable_on_cpu(name, shape, initializer):\n with tf.device('/cpu:0'):\n dtype = tf.float32\n var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)\n return var", "def _variable_on_cpu(name, shape, initializer):\n with tf.device('/cpu:0'):\n var = tf.get_variable(name, shape, initializer=initializer, dtype=tf.float32)\n return var", "def _variable_on_cpu(name, shape, initializer):\n with tf.device('/cpu:0'):\n var = tf.get_variable(name, shape, initializer=initializer)\n return var", "def create_cpu():\n return CPU()", "def variable_on_cpu(name, shape, initializer):\n # Use the /cpu:0 device for scoped operations\n with tf.device('/cpu:0'):\n # Create or get apropos variable\n var = tfv1.get_variable(name=name, shape=shape, initializer=initializer)\n return var", "def to_var( x):\n if torch.cuda.is_available():\n x = x.cuda()\n return Variable(x)", "def variable_on_cpu(name, shape, initializer):\n # Use the /cpu:0 device for scoped operations\n with tf.device('/cpu:0'):\n # Create or get apropos variable\n var = tf.get_variable(name=name, shape=shape, initializer=initializer)\n return var", "def _variable_on_cpu(name, shape, initializer):\n with tf.device('/cpu:0'):\n dtype = tf.float16 if FLAGS.use_fp16 else tf.float32\n var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)\n return var", "def _variable_on_cpu(name, shape, initializer):\n with tf.device('/cpu:0'):\n dtype = tf.float16 if FLAGS.use_fp16 else tf.float32\n var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)\n return var", "def variable_on_cpu(name,\n shape,\n initializer,\n dtype=tf.float32,\n trainable=True):\n with tf.device('/cpu:0'):\n var = tf.get_variable(name, shape, initializer=initializer,\n dtype=dtype, trainable=trainable)\n return var", "def _variable_on_cpu(name, shape, initializer):\n dtype = tf.float16 if FLAGS.use_fp16 else tf.float32\n \n return tf.get_variable(name, shape, initializer=initializer, dtype=dtype)", "def to_var(self, x):\n if torch.cuda.is_available():\n x = x.cuda()\n return Variable(x)", "def make_variable(self, name = None):\r\n return self.Variable(self, name = name)", "def make_variable(self, name=None):\r\n return self.Variable(self, name=name)", "def make_variable(self, name=None):\r\n return self.Variable(self, name=name)", "def _variable_on_cpu_with_constant(name, initializer):\n with tf.device('/cpu:0'):\n dtype = tf.float16 if FLAGS.use_fp16 else tf.float32\n var = tf.get_variable(name, initializer=initializer, dtype=dtype)\n return var", "def load_shared_variable(val):\r\n return tensor_constructor(val)", "def make_variable(tensor, volatile=False):\n if torch.cuda.is_available():\n tensor = tensor.cuda()\n with torch.no_grad():\n return Variable(tensor)", "def to_Variable(sequence):\n if is_cuda():\n tensor = torch.cuda.LongTensor(sequence)\n else:\n tensor = torch.LongTensor(sequence)\n \n return Variable(tensor)", "def make_var(x, dtype=np.float32, cuda=True, volatile=False, requires_grad=False):\n if type(x) != Variable:\n if isinstance(x, np.ndarray): \n x = torch.from_numpy(x.astype(dtype))\n x = Variable(x, volatile=volatile, requires_grad=requires_grad)\n return make_cuda(x) if cuda else x", "def var(*args, **kwargs):\n return Variable(*args, **kwargs)", "def create_variable(name, size, number_of_nodes, type, casadi_type = 'SX'):\n SX_var = SX.sym('SX_'+name, size)\n opc_var = []\n\n ns = 0\n if type == \"STATE\":\n ns = number_of_nodes\n elif type == \"CONTROL\":\n ns = number_of_nodes-1\n elif type == \"FINAL_STATE\":\n ns = 1\n\n if casadi_type is 'MX':\n for i in range(ns):\n opc_var.append(MX.sym(name + str(i), SX_var.size1()))\n elif casadi_type is 'SX':\n for i in range(ns):\n opc_var.append(SX.sym(name + str(i), SX_var.size1()))\n else:\n raise Exception('casadi_type can be only SX or MX')\n\n return SX_var, opc_var", "def _create_or_match_memory_variable(var):\n global _MEMORY_VARIABLE_LIST\n var_list = [a[0] for a in _MEMORY_VARIABLE_LIST]\n if var in var_list:\n i = var_list.index(var)\n return _MEMORY_VARIABLE_LIST[i][1]\n else:\n memory_var = tk.StringVar()\n _MEMORY_VARIABLE_LIST.append([var,memory_var])\n return memory_var", "def to_variable(value, block=None, name=None):\n if isinstance(value, np.ndarray):\n assert framework.in_dygraph_mode(\n ), \"to_variable could only be called in dygraph mode\"\n\n if not block:\n block = framework.default_main_program().current_block()\n py_var = framework.Variable(\n block,\n type=core.VarDesc.VarType.LOD_TENSOR,\n name=name,\n shape=value.shape,\n dtype=value.dtype,\n stop_gradient=True)\n var = py_var._ivar.value()\n tensor = var.get_tensor()\n if value.dtype == np.float16:\n value = value.view(np.uint16)\n tensor.set(value, framework._current_expected_place())\n return py_var\n elif isinstance(value, framework.Variable):\n return value\n else:\n raise TypeError(\n \"to_variable only accepts 'ndarray' and 'Variable' as value's input\")", "def variable(value, dtype, name=None, broadcastable=None):\n return tf.Variable(value, dtype=dtype, name=name)", "def get_variable(x, volatile=False):\n tensor = torch.cuda.LongTensor(x) if CUDA else torch.LongTensor(x)\n return autograd.Variable(tensor, volatile=volatile)", "def get_variable(x):\n return x.cuda() #if use_cuda else x", "def Variable(name):\n placeholder_node = placeholder_op()\n placeholder_node.name = name\n return placeholder_node", "def variable_on_gpu(name, shape, initializer):\n # Use the /cpu:0 device for scoped operations\n with tf.device('/device:GPU:0'):\n # Create or get apropos variable\n var = tf.get_variable(name=name, shape=shape, initializer=initializer)\n return var" ]
[ "0.6677603", "0.6677603", "0.66557324", "0.6650669", "0.6561501", "0.6552209", "0.6532292", "0.6519638", "0.6515672", "0.6498396", "0.6447161", "0.63890946", "0.6377066", "0.63579035", "0.6342367", "0.6342367", "0.63155484", "0.63096213", "0.62526655", "0.6182907", "0.61236006", "0.6030544", "0.60177696", "0.60171545", "0.5951243", "0.5921852", "0.59193623", "0.58821416", "0.5873333", "0.58672804" ]
0.66882116
0
add leaky relu layer
def _add_leaky_relu(hl_tensor, leaky_param): return tf.maximum(hl_tensor, tf.mul(leaky_param, hl_tensor))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_leaky_relu(g, op, block):\n\n alpha = op.attr(\"alpha\")\n x = g.get_node(op.input(\"X\")[0])\n out = _op.nn.leaky_relu(x, alpha=alpha)\n g.add_node(op.output(\"Out\")[0], out)", "def make_leaky(net: nn.Module, leak: float = 0.2) -> nn.Module:\n def do_it(m: nn.Module) -> nn.Module:\n if isinstance(m, nn.ReLU):\n return nn.LeakyReLU(leak, m.inplace)\n return m\n\n return edit_model(net, do_it)", "def init_leaky_relu(m, a=None):\n if not isinstance(m, torch.nn.Conv2d):\n return\n if a is None:\n a = nn.modules.activation.LeakyReLU().negative_slope\n nn.init.kaiming_uniform_(m.weight, a=a)", "def leaky_rel(x, alpha=0.1):\r\n return tf.nn.leaky_relu(x, alpha=alpha)", "def leaky_relu(self, x, name, leak=0.2):\r\n return tf.maximum(x, leak * x, name=name)", "def leaky_relu(features, alpha=0.2, name=None):\n with ops.name_scope(name, \"LeakyRelu\", [features, alpha]) as name:\n features = ops.convert_to_tensor(features, name=\"features\")\n if features.dtype.is_integer:\n features = math_ops.cast(features, dtypes.float32)\n if isinstance(alpha, np.ndarray):\n alpha = alpha.item()\n return gen_nn_ops.leaky_relu(features, alpha=alpha, name=name)", "def residual_block(layer, filters):\n shortcut = layer\n layer = Conv2D(filters=filters, kernel_size=(3, 3),\n strides=(1, 1), padding=\"same\")(layer)\n layer = LeakyReLU(alpha=LEAKY_RELU_ALPHA)(layer)\n layer = Conv2D(filters=filters, kernel_size=(3, 3),\n strides=(1, 1), padding=\"same\")(layer)\n\n layer = Add()([layer, shortcut])\n layer = LeakyReLU(alpha=LEAKY_RELU_ALPHA)(layer)\n return layer", "def lrelu(self):\n return self.add_layer(lrelu)", "def intermediate_layer(layer, filters, kernel_size):\n layer = Conv2D(filters=filters, kernel_size=kernel_size,\n strides=(2, 2), padding=\"same\")(layer)\n layer = LeakyReLU(alpha=LEAKY_RELU_ALPHA)(layer)\n return layer", "def model_with_leaky_relu():\n inputs = tf.keras.Input(shape=(8, 8, 3,))\n x = tf.keras.layers.Conv2D(8, (2, 2))(inputs)\n x = tf.nn.leaky_relu(x, alpha=.4)\n x = tf.keras.layers.Conv2D(4, (2, 2))(x)\n x = tf.keras.layers.Flatten()(x)\n outputs = tf.keras.layers.Dense(10, activation=tf.nn.softmax, name=\"model_with_leaky_relu\")(x)\n return outputs", "def leaky_relu(input, negative_slope=0.01, inplace=False):\n return FunctionLib.apply(\n 'Relu', input.device, [input],\n outputs=[input if inplace else None], alpha=float(negative_slope))", "def lrelu(x, leak=1e-3, name=None):\n return tf.maximum(x, leak * x, name=name)", "def leaky_relu(x, alpha, name=None):\n import tensorflow as tf\n with tf.name_scope(name, 'leaky_relu_{}'.format(alpha)):\n return tf.nn.relu(x) - alpha * tf.nn.relu(-x)", "def update_relus(self):\n def relu_backward_hook_function(module, grad_in, grad_out):\n \"\"\"\n If there is a negative gradient, change it to zero\n \"\"\"\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)\n\n def relu_forward_hook_function(module, ten_in, ten_out):\n \"\"\"\n Store results of forward pass\n \"\"\"\n self.forward_relu_outputs.append(ten_out)\n\n # Loop through layers, hook up ReLUs\n for pos, module in self.model._features_extractor._modules.items():\n for layer in module:\n if isinstance(layer, LeakyReLU):\n layer.register_backward_hook(relu_backward_hook_function)\n layer.register_forward_hook(relu_forward_hook_function)", "def _relu(layer):\n return tf.nn.relu(layer)", "def leaky_relu(x, slope=0.2):\n return LeakyReLU(slope)(x)", "def __init__(self, input_dim, output_dim, hidden_dim=100, num_layers=2,\r\n num_epochs=100, learning_rate=0.001, threshold=0.1):\r\n super(ReluNet, self).__init__()\r\n\r\n self.input_dim = input_dim\r\n self.output_dim = output_dim\r\n self.hidden_dim = hidden_dim\r\n self.num_layers = num_layers\r\n self.num_epochs = num_epochs\r\n self.threshold = threshold\r\n self.learning_rate = learning_rate\r\n\r\n\r\n \r\n self.layers = nn.ModuleList()\r\n self.layers.append(nn.Linear(input_dim, hidden_dim))\r\n self.layers.append(nn.ReLU())\r\n for i in range(num_layers-1):\r\n self.layers.append(nn.Linear(hidden_dim, hidden_dim))\r\n self.layers.append(nn.ReLU())\r\n # output layer\r\n self.layers.append(nn.Linear(hidden_dim, output_dim))", "def leaky_relu(x, alpha=0.01):\n return tf.maximum(x,alpha*x)", "def makeReLUMessage(layer_callback):\n layer_callback.type = 5\n layer_callback.act.type = 0", "def leaky_relu(x, alpha=0.01):\n return tf.maximum(x, tf.multiply(x, alpha))", "def relu(x, name):\n\n with tf.name_scope(name):\n outputs = tf.nn.relu(x)\n # Return layer's output\n return outputs", "def relu_forward_hook_function(module, ten_in, ten_out):\n self.forward_relu_outputs.append(ten_out)", "def relu_forward_hook_function(module, ten_in, ten_out):\n self.forward_relu_outputs.append(ten_out)", "def relu_forward_hook_function(module, ten_in, ten_out):\n self.forward_relu_outputs.append(ten_out)", "def relu_forward_hook_function(module, ten_in, ten_out):\n self.forward_relu_outputs.append(ten_out)", "def relu_forward_hook_function(module, ten_in, ten_out):\n self.forward_relu_outputs.append(ten_out)", "def update_relus(self):\n def relu_backward_hook_function(module, grad_in, grad_out):\n \"\"\"\n If there is a negative gradient, change it to zero\n \"\"\"\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)\n\n def relu_forward_hook_function(module, ten_in, ten_out):\n \"\"\"\n Store results of forward pass\n \"\"\"\n self.forward_relu_outputs.append(ten_out)\n\n # Loop through layers, hook up ReLUs\n for pos, module in self.model.features._modules.items():\n if isinstance(module, ReLU):\n module.register_backward_hook(relu_backward_hook_function)\n module.register_forward_hook(relu_forward_hook_function)", "def convert_leakyrelu(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n initializer = kwargs[\"initializer\"]\n\n act_type = attrs.get(\"act_type\", \"leaky\")\n alpha = float(attrs.get(\"slope\", 0.25))\n\n act_name = {\"elu\": \"Elu\", \"leaky\": \"LeakyRelu\", \"prelu\": \"PRelu\",\n \"selu\": \"Selu\"}\n\n reshape_val_name = 'reshape' + str(kwargs[\"idx\"])\n input_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype('int64')]\n\n reshape_value = np.array([1, -1, 1, 1], dtype='int64')\n dims = np.shape(reshape_value)\n\n shape_node = onnx.helper.make_tensor_value_info(reshape_val_name, input_type, dims)\n initializer.append(\n onnx.helper.make_tensor(\n name=reshape_val_name,\n data_type=input_type,\n dims=dims,\n vals=reshape_value,\n raw=False,\n )\n )\n\n slope_op_name = 'slope' + str(kwargs[\"idx\"])\n\n lr_node = []\n if act_type == \"prelu\" or act_type == \"selu\":\n reshape_slope_node = onnx.helper.make_node(\n 'Reshape',\n inputs=[input_nodes[1], reshape_val_name],\n outputs=[slope_op_name],\n name=slope_op_name\n )\n\n node = onnx.helper.make_node(\n act_name[act_type],\n inputs=[input_nodes[0], slope_op_name],\n outputs=[name],\n name=name)\n\n lr_node.append(shape_node)\n lr_node.append(reshape_slope_node)\n lr_node.append(node)\n else:\n node = onnx.helper.make_node(\n act_name[act_type],\n inputs=input_nodes,\n outputs=[name],\n name=name,\n alpha=alpha)\n lr_node.append(node)\n return lr_node", "def run(self, x):\n \"*** YOUR CODE HERE ***\"\n layer1 = nn.ReLU(nn.AddBias(nn.Linear(x, self.layer1), self.bias1))\n layer2 = nn.ReLU(nn.AddBias(nn.Linear(layer1, self.layer2), self.bias2))\n layer3 = nn.AddBias(nn.Linear(layer2, self.layer3), self.bias3)\n return layer3", "def _create_leakyrelu(cls, onnx_node, inputs, opset_version):\n alpha = onnx_node.getattr(\"alpha\", 0.01)\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(alpha)" ]
[ "0.66270137", "0.65977484", "0.658509", "0.65391964", "0.6505456", "0.6469088", "0.643801", "0.643752", "0.6395083", "0.638309", "0.6296739", "0.6291487", "0.6265566", "0.6173588", "0.6151767", "0.6140381", "0.61133206", "0.60983187", "0.60966843", "0.5993868", "0.5982271", "0.5977898", "0.5977898", "0.5977898", "0.5977898", "0.5977898", "0.59577006", "0.5935999", "0.59166986", "0.5902135" ]
0.6641197
0
3d average pool layer
def _avg_pool3(x, ksize, strides, name): pool = tf.nn.avg_pool3d(x, ksize = ksize, strides = strides, padding = 'VALID', name = name) return pool
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def avg_pool3d(input, kernel_size, stride=1, padding=0, ceil_mode=False):\n return _pool('AVG', utils._triple, **locals())", "def avg_pool3d(input, ksize, strides, padding, data_format=\"NDHWC\", name=None): # pylint: disable=redefined-builtin\n with ops.name_scope(name, \"AvgPool3D\", [input]) as name:\n if data_format is None:\n data_format = \"NDHWC\"\n channel_index = 1 if data_format.startswith(\"NC\") else 3\n\n ksize = _get_sequence(ksize, 3, channel_index, \"ksize\")\n strides = _get_sequence(strides, 3, channel_index, \"strides\")\n\n return gen_nn_ops.avg_pool3d(\n input,\n ksize=ksize,\n strides=strides,\n padding=padding,\n data_format=data_format,\n name=name)", "def avg_pool(x,\n k_h,\n k_w,\n s_h,\n s_w,\n name,\n padding=\"VALID\"):\n with tf.name_scope(name):\n outputs = tf.nn.avg_pool(x, [1, k_h, k_w, 1], [1, s_h, s_w, 1], padding)\n # Return layer's output\n return outputs", "def adaptive_avg_pool3d(input, output_size):\n args = utils._get_adaptive_pool_args(\n input.size()[-3:], utils._triple(output_size))\n return _pool('AVG', utils._triple, input, **args)", "def avg_pooling(self, filter_):\n return self.add_layer(avg_pooling, filter_)", "def spatial_avg(self, input_layer):\n return tf.reduce_mean(input_layer, [2, 3], name='spatial_avg')", "def _avgpool(prev_layer):\n return tf.nn.avg_pool(prev_layer, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')", "def mean_allcnnc():\n # TODO implement pre forward hook to adapt to arbitary image size for other data sets than cifar100\n return nn.Sequential(\n nn.AvgPool2d(kernel_size=(6, 6)),\n flatten()\n )", "def convert_pool3d(g, op, block):\n\n adaptive = op.attr(\"adaptive\")\n ceil_mode = op.attr(\"ceil_mode\")\n global_pooling = op.attr(\"global_pooling\")\n ksize = op.attr(\"ksize\")\n paddings = op.attr(\"paddings\")\n padding_algorithm = op.attr(\"padding_algorithm\")\n pooling_type = op.attr(\"pooling_type\")\n data_format = op.attr(\"data_format\")\n\n if global_pooling:\n adaptive = True\n ksize = [1, 1, 1]\n\n input_x = g.get_node(op.input(\"X\")[0])\n _, _, _, in_h, in_w = infer_shape(input_x)\n\n op_map = {\n \"avg\": \"avg_pool3d\",\n \"max\": \"max_pool3d\",\n }\n\n strides = op.attr(\"strides\")\n if isinstance(strides, int):\n strides = [strides, strides]\n if isinstance(ksize, int):\n ksize = [ksize, ksize, ksize]\n if isinstance(paddings, int):\n paddings = [paddings] * 3\n\n if padding_algorithm == \"VALID\":\n paddings = [0, 0, 0]\n elif padding_algorithm == \"SAME\":\n input_x = autopad(input_x, strides, ksize)\n paddings = [0, 0, 0]\n elif padding_algorithm == \"EXPLICIT\":\n if len(paddings) == 3:\n paddings = [\n paddings[0],\n paddings[1],\n paddings[2],\n paddings[0],\n paddings[1],\n paddings[2],\n ]\n elif len(paddings) == 6:\n paddings = [\n paddings[0],\n paddings[3],\n paddings[1],\n paddings[4],\n paddings[2],\n paddings[5],\n ]\n else:\n msg = 'Value {} in attribute \"padding\" of operator Pool3d is not \"valid.\"'\n raise tvm.error.OpAttributeInvalid(msg.format(padding_algorithm))\n\n # handle with special case\n # while kernel size less than input size\n # shrink kernel size to input size\n if (\n not isinstance(in_h, _op.Expr)\n and padding_algorithm == \"EXPLICIT\"\n and in_h + paddings[0] + paddings[2] < ksize[0]\n ):\n ksize[0] = in_h\n if (\n not isinstance(in_w, _op.Expr)\n and padding_algorithm == \"EXPLICIT\"\n and in_w + paddings[1] + paddings[3] < ksize[1]\n ):\n ksize[1] = in_w\n\n if not adaptive:\n if pooling_type == \"avg\":\n exclusive = op.attr(\"exclusive\")\n out = _op.nn.avg_pool3d(\n input_x,\n pool_size=ksize,\n strides=strides,\n padding=paddings,\n ceil_mode=ceil_mode,\n count_include_pad=not exclusive,\n layout=data_format,\n )\n else:\n out = getattr(_op.nn, op_map[pooling_type])(\n input_x, pool_size=ksize, strides=strides, padding=paddings, ceil_mode=ceil_mode\n )\n else:\n out = getattr(_op.nn, \"adaptive_\" + op_map[pooling_type])(\n input_x, output_size=ksize, layout=data_format\n )\n g.add_node(op.output(\"Out\")[0], out)", "def avg_pool_nd(dims, *args, **kwargs):\n if dims == 1:\n return nn.AvgPool1D(*args, **kwargs)\n elif dims == 2:\n return nn.AvgPool2D(*args, **kwargs)\n elif dims == 3:\n return nn.AvgPool3D(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")", "def avgpool(input, filter_h, filter_w, stride_h, stride_w, padding, name):\n with tf.name_scope(name):\n ret = tf.nn.avg_pool(input, ksize=[1, filter_h, filter_w, 1], strides=[1, stride_h, stride_w, 1],\n padding=padding)\n print(name, \" : \", str(ret.shape))\n return ret", "def average(img, size=3):\n \n size = int(size)\n kernel = np.ones((size,size)) / float(size**2)\n\n return ndi.convolve(img,kernel)", "def all_views_global_avg_pool(input_layer, network_type):\r\n if network_type == \"CC\":\r\n input_l_cc, input_r_cc = input_layer\r\n\r\n input_layer_shape = input_l_cc.get_shape()\r\n pooling_shape = [1, input_layer_shape[1], input_layer_shape[2], 1]\r\n\r\n output_l_cc = tf.nn.avg_pool(input_l_cc, ksize=pooling_shape, strides=pooling_shape, padding='SAME')\r\n output_r_cc = tf.nn.avg_pool(input_r_cc, ksize=pooling_shape, strides=pooling_shape, padding='SAME')\r\n\r\n output = (output_l_cc, output_r_cc)\r\n return output\r\n else:\r\n input_l_mlo, input_r_mlo = input_layer\r\n\r\n input_layer_shape = input_l_mlo.get_shape()\r\n pooling_shape = [1, input_layer_shape[1], input_layer_shape[2], 1]\r\n\r\n output_l_mlo = tf.nn.avg_pool(input_l_mlo, ksize=pooling_shape, strides=pooling_shape, padding='SAME')\r\n output_r_mlo = tf.nn.avg_pool(input_r_mlo, ksize=pooling_shape, strides=pooling_shape, padding='SAME')\r\n\r\n output = (output_l_mlo, output_r_mlo)\r\n\r\n return output", "def spatial_average_pooling(x):\n return np.squeeze(x).mean(axis=0).mean(axis=0)", "def get_average(self, samples=50):\n first = self.layers[0].load_image()\n res = np.zeros(first.shape, dtype=float)\n intervals = len(self.layers)/samples\n for l in self.layers[::int(intervals)]:\n img = l.load_image().astype(float)\n res += img\n l.image = None\n return samples**-1*res", "def average(layer_list):\n\n if len(layer_list) < 1:\n print(\"error: averaging zero layers!\")\n return\n\n if len(layer_list) == 1:\n return layer_list[0]\n\n if (\n layer_list[0].name != layer_list[1].name\n or layer_list[0].dimensions != layer_list[1].dimensions\n ):\n print(\"{} != {}\".format(layer_list[0].name, layer_list[1].name))\n print(\"or\")\n print(\"{} != {}\".format(layer_list[0].dimensions, layer_list[1].dimensions))\n print(\"error: averaging different layers!\")\n return\n\n out = LayerSparsity(self.name, self.dimensions, layer_list[0].vector_sizes)\n\n for key in layer_list[0].histograms:\n out.histograms[key] = np.mean(\n np.array([layer.histograms[key] for layer in layer_list]), axis=0\n ).tolist()\n\n return out", "def forward(self, x):\r\n x = x.permute(0, 2, 1)\r\n x = torch.nn.functional.avg_pool1d(x, self.kernel_size)\r\n return x.permute(0, 2, 1)", "def global_average_pooling_2d(x, use_cudnn=True):\n\n return F.average_pooling_2d(x, ksize=(x.shape[2], x.shape[3]), use_cudnn=use_cudnn)", "def _global_avg_pool(x):\n with tf.name_scope('global_avg_pool'):\n assert x.get_shape().ndims == 4\n return tf.reduce_mean(x, [1, 2])", "def global_avg_pooling(self, x: tf.Tensor) -> tf.Tensor:\n x = tf.reduce_mean(x, [1, 2], name='pool5', keepdims=True)\n x = slim.conv2d(x, self.num_classes, [1, 1], activation_fn=None, normalizer_fn=None, scope='logits')\n return tf.squeeze(x, [1, 2], name='spatial_squeeze')", "def _get_mean(self):\n return [layer._get_mean() for layer in self.layers]", "def forward(self, x):\n x = x.permute(0, 2, 1)\n x = torch.nn.functional.avg_pool1d(x, self.kernel_size)\n return x.permute(0, 2, 1), None", "def avg_pool(\n inputs, window_shape, strides=None, padding=\"VALID\", count_include_pad=True\n):\n y = pool(inputs, 0.0, lax.add, window_shape, strides, padding)\n if count_include_pad:\n y = y / np.prod(window_shape)\n else:\n div_shape = inputs.shape[:-1] + (1,)\n if len(div_shape) - 2 == len(window_shape):\n div_shape = (1,) + div_shape[1:]\n y = y / pool(\n jnp.ones(div_shape), 0.0, lax.add, window_shape, strides, padding\n )\n return y", "def Global_Average_Pooling(x, stride=1):\n\n return global_avg_pool(x, name='Global_avg_pooling')\n # But maybe you need to install h5py and curses or not", "def c3d(self):\n model = Sequential()\n # 1st layer group\n model.add(Conv3D(64, 3, 3, 3, activation='relu',\n border_mode='same', name='conv1',\n subsample=(1, 1, 1),\n input_shape=self.input_shape))\n model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2),\n border_mode='valid', name='pool1'))\n # 2nd layer group\n model.add(Conv3D(128, 3, 3, 3, activation='relu',\n border_mode='same', name='conv2',\n subsample=(1, 1, 1)))\n model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),\n border_mode='valid', name='pool2'))\n # 3rd layer group\n model.add(Conv3D(256, 3, 3, 3, activation='relu',\n border_mode='same', name='conv3a',\n subsample=(1, 1, 1)))\n model.add(Conv3D(256, 3, 3, 3, activation='relu',\n border_mode='same', name='conv3b',\n subsample=(1, 1, 1)))\n model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),\n border_mode='valid', name='pool3'))\n # 4th layer group\n model.add(Conv3D(512, 3, 3, 3, activation='relu',\n border_mode='same', name='conv4a',\n subsample=(1, 1, 1)))\n model.add(Conv3D(512, 3, 3, 3, activation='relu',\n border_mode='same', name='conv4b',\n subsample=(1, 1, 1)))\n model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),\n border_mode='valid', name='pool4'))\n\n # 5th layer group\n model.add(Conv3D(512, 3, 3, 3, activation='relu',\n border_mode='same', name='conv5a',\n subsample=(1, 1, 1)))\n model.add(Conv3D(512, 3, 3, 3, activation='relu',\n border_mode='same', name='conv5b',\n subsample=(1, 1, 1)))\n model.add(ZeroPadding3D(padding=(0, 1, 1)))\n # model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),\n # border_mode='valid', name='pool5', dim_ordering=\"tf\"))\n model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),\n border_mode='same', name='pool5', dim_ordering=\"tf\"))\n model.add(Flatten())\n\n # FC layers group\n model.add(Dense(4096, activation='relu', name='fc6'))\n model.add(Dropout(0.5))\n model.add(Dense(4096, activation='relu', name='fc7'))\n model.add(Dropout(0.5))\n model.add(Dense(self.nb_classes, activation='softmax'))\n for layer in model.layers:\n print(layer.output_shape)\n return model", "def apool1(x, p):\n if p > 1:\n x = tf.expand_dims(x, 3) # N x M x F x 1\n x = tf.nn.avg_pool(x, ksize=[1, p, 1, 1], strides=[1, p, 1, 1], padding='SAME')\n return tf.squeeze(x, [3]) # N x M/p x F\n else:\n return x", "def get_3d_points(preds_3d):\n for i,p in enumerate(preds_3d):\n preds_3d[i] = preds_3d[i] - preds_3d[i].mean(0)*np.ones((16,1));\n return preds_3d;", "def Compute3d(self, *args):\n return _BRepAlgo.BRepAlgo_NormalProjection_Compute3d(self, *args)", "def image_roi_average(img3d,roi):\n AVGs=np.empty(len(img3d))\n for frame in range(len(img3d)):\n values=np.empty(len(roi['area']))\n for i,pair in enumerate(roi['area']):\n values[i]=img3d[frame,pair[1],pair[0]]\n AVGs[frame]=np.average(values)\n #print(\"WARNING: LOWPASSING\")\n #AVGs=lowpass(AVGs,10)\n return AVGs", "def aggregate(self, solns, **kwargs):\n\n averaged_solution = torch.zeros_like(self.latest_model)\n # averaged_solution = np.zeros(self.latest_model.shape)\n if self.simple_average:\n num = 0\n for num_sample, local_solution in solns:\n num += 1\n averaged_solution += local_solution\n averaged_solution /= num\n else:\n for num_sample, local_solution in solns:\n averaged_solution += num_sample * local_solution\n averaged_solution /= self.all_train_data_num\n\n # averaged_solution = from_numpy(averaged_solution, self.gpu)\n return averaged_solution.detach()" ]
[ "0.71829695", "0.6880422", "0.6654039", "0.66025543", "0.655478", "0.6549917", "0.6522013", "0.6423354", "0.6423186", "0.62474144", "0.62447995", "0.61971706", "0.61574334", "0.61561435", "0.61540276", "0.61267376", "0.61063224", "0.61060435", "0.6097661", "0.6022569", "0.5984992", "0.5977112", "0.59586173", "0.59564245", "0.5952097", "0.5911199", "0.5891547", "0.5884233", "0.58789283", "0.5861596" ]
0.7235534
0
Given a directory path to start, looks for filenames in the directory, and then each parent directory successively, until found. Returns tuple (candidates, path).
def find_candidates_in_parent_dirs(filenames, path): candidates = [filename for filename in filenames if os.path.exists(os.path.join(path, filename))] if not candidates: parent_dir = os.path.join(path, '..') if os.path.abspath(parent_dir) != os.path.abspath(path): return find_candidates_in_parent_dirs(filenames, parent_dir) return (candidates, path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_folder(startpath, folder_name, first_occurrence=False):\n candidates = []\n for root, dirs, files in os.walk(startpath):\n for d in dirs:\n if d == folder_name.strip('/'):\n if first_occurrence:\n candidates.append(os.path.abspath(root + '/' + d))\n return candidates\n candidates.append(os.path.abspath(root+'/'+d))\n return candidates", "def IteratePathParents(start_path):\n path = os.path.abspath(start_path)\n yield path\n while path.strip('/'):\n path = os.path.dirname(path)\n yield path", "def _find(self, path):\n try:\n self.log.debug(\"Searching for '%s'\" % path)\n os.stat(path)\n self.log.debug(\" found!\")\n return path\n except OSError:\n self.log.debug(\" not found!\")\n segs = path.split(os.path.sep)\n found_parent = os.path.sep\n assert segs[0] == '' # expect a leading /\n for i in range(1, len(segs)): # start after leading /\n try:\n parent = os.path.sep.join(segs[:i+1])\n self.log.debug(\" searching parent %d %s\" % (i, str(parent)))\n os.stat(parent)\n self.log.debug(\" found\")\n found_parent = parent\n except OSError:\n self.log.debug(\" NOT found\")\n break\n\n # does the found_parent dir contain a differently-cased version of the requested path?\n candidates = [f for f in os.listdir(found_parent) if f.lower() == segs[i].lower()]\n self.log.debug(' Candidates: %s' % str(candidates))\n if candidates:\n if len(candidates) > 1:\n self.log.warn('Case ambiguity: %s%s{%s}' % (found_parent, os.path.sep, ','.join(candidates)))\n segs[i] = candidates[0]\n path = os.path.sep.join(segs)\n if i < (len(segs)-1):\n self.log.debug('recursing')\n path = self._find(path) # recursively search with the new case-corrected path segment\n\n self.log.debug('resolved to [or failed with] path: %s' % path)\n\n # returns path unmodified if we were unable to find case-corrected candidates.\n # expects underlying command implementations to handle file-not-found correctly if so.\n return path", "def _search_parent_dir(file_name):\n\n current_dir = os.getcwd()\n parent_dir = os.path.dirname(current_dir)\n while current_dir != parent_dir:\n if not os.path.splitdrive(current_dir)[-1]:\n return False\n file_list = os.listdir(current_dir)\n parent_dir = os.path.dirname(current_dir)\n\n if file_name in file_list:\n return current_dir\n\n else:\n current_dir = parent_dir\n return False", "def _find_files(directory: str, pattern: str) -> Iterator[str]:\n for root, dirs, files in os.walk(directory, topdown=True):\n dirs[:] = [d for d in dirs if _is_file_valid(d)]\n for basename in sorted(files):\n if _is_file_valid(basename) and fnmatch.fnmatch(basename, pattern):\n filename = os.path.join(root, basename)\n yield filename", "def traverse(self, path):\n path_list = [s for s in path.split('/') if len(s) > 0 ]\n directory = self.files\n index = 0\n while index < len(path_list) and path_list[index] in directory:\n if type(directory[path_list[index]]) is str: # directory is a file\n break\n directory = directory[path_list[index]]\n index += 1\n return directory, path_list[index:]", "def getFilePaths(directory):\r\n\tfor folder, subs, files in os.walk(directory):\r\n\t\tfor filename in files:\r\n\t\t\tyield os.path.join(folder, filename)", "def walk_parents(path):\n while os.path.splitdrive(path)[1] != os.sep:\n yield path\n path = os.path.dirname(path)", "def getImmediateSubdirectories(dir):", "def search_files(filename, search_path, pathsep=os.pathsep):\n clidFiles = []\n for path in search_path.split(pathsep):\n candidate = os.path.join(path, filename)\n if os.path.exists(candidate): clidFiles.append(os.path.abspath(candidate))\n return clidFiles", "def find_files(root, directory, filename):\n\n path_list = []\n walker = os.walk(root, followlinks=True)\n for root, dirs, files in walker:\n remove_vcs_dirs(dirs)\n\n #if dirs containt 'directory', don't walk others\n if directory in dirs: dirs[:] = [directory]\n\n if root.endswith(os.path.sep + directory):\n if filename in files:\n path_list.append(os.path.join(root, filename))\n dirs[:] = []\n\n return path_list", "def _walk_to_root(path):\n if not os.path.exists(path):\n raise IOError('Starting path not found')\n\n if os.path.isfile(path):\n path = os.path.dirname(path)\n\n last_dir = None\n current_dir = os.path.abspath(path)\n while last_dir != current_dir:\n yield current_dir\n parent_dir = os.path.abspath(os.path.join(current_dir, os.path.pardir))\n last_dir, current_dir = current_dir, parent_dir", "def findFiles(target, path):\r\n\tfiles = []\r\n\tlyst = os.listdir(path)\r\n\tfor element in lyst:\r\n\t\tif os.path.isfile(element):\r\n\t\t\tif target in element:\r\n\t\t\t\tfiles.append(path + os.sep + element)\r\n\t\telse:\r\n\t\t\tos.chdir(element)\r\n\t\t\tfiles.extend(findFiles(target, os.getcwd()))\r\n\t\t\tos.chdir(\"..\")\r\n\treturn files", "def _recursive_file_search(self, path, pattern):\n matches = []\n for root, dirnames, filenames in os.walk(path):\n for filename in fnmatch.filter(filenames, pattern):\n matches.append(os.path.join(root, filename))\n\n return matches", "def search_parents(name, cwd):\n for pdir in parents(cwd):\n if name in os.listdir(pdir):\n return os.path.join(pdir, name)\n\n return None", "def get_files_in_dir(directory, name, depth):\n return [join(directory, f) for f in listdir(directory) if isfile(join(\n directory, f)) and f.startswith(name) and get_depth(f) == depth]", "def _complete_path(path=None):\r\n if not path:\r\n return _listdir('.')\r\n dirname, rest = os.path.split(path)\r\n tmp = dirname if dirname else '.'\r\n res = [p for p in _listdir(tmp) if p.startswith(rest)]\r\n # more than one match, or single match which does not exist (typo)\r\n if len(res) > 1 or not os.path.exists(path):\r\n return res\r\n # resolved to a single directory, so return list of files below it\r\n if os.path.isdir(path):\r\n return [p for p in _listdir(path)]\r\n # exact file match terminates this completion\r\n return [path + ' ']", "def scan_tree(path):\n list_of_file_paths = []\n for file_obj in scandir(path):\n if file_obj.is_dir(follow_symlinks=False):\n # yield from scan_tree(file_obj.path)\n list_of_file_paths.extend(scan_tree(file_obj.path))\n else:\n # yield file_path\n if 'DS_Store' not in file_obj.path:\n list_of_file_paths.append(file_obj.path)\n return list_of_file_paths", "def get_files_from_root(self, candidate_filenames):\n\n def find(candidate_filenames, path):\n found_files = {}\n for root, dirs, files in os.walk(path):\n for name in files:\n for poss_name in candidate_filenames:\n if fnmatch.fnmatch(name, poss_name):\n found_files[name] = os.path.join(root, name)\n return found_files\n\n return find(candidate_filenames, self.temp_location)", "def determine_subdirectories(file_path):\n\tsource_dir = file_path.replace('/data/','/data-cg1d/')\n\tlead_dir_1, subdir_1 = split_leading_directory(source_dir)\n\tlead_dir_2, subdir_2 = split_leading_directory(subdir_1)\n\tipts_dir, new_subdir = split_leading_directory(subdir_2)\n\tprint('\\n\\nsource_dir: {}\\nlead_dir_2: {}\\nsubdir_2: {}\\nipts_dir: {}\\n new_subdir: {}\\n\\n'.format(\n\t\tsource_dir, lead_dir_2, subdir_2, ipts_dir, new_subdir))\n\treturn source_dir, ipts_dir, new_subdir", "def find_files_in_dirs(directory_list, filename):\n path_list = []\n\n for directory in directory_list :\n path_list += find_files(\n directory, \n constants.PRJ_OPT_DIR, \n filename\n )\n\n return path_list", "def recursive_search(path, target_files):\n for root, _dirs, files in os.walk(path):\n for filename in files:\n if filename in target_files:\n return os.path.join(root, filename)", "def _recursivelyFindFile(self, topLevelDirectory, filename):\n print ('finding ' + filename + '...\\n')\n tempSubDirs = {} #initialize temporary dictionary of sbudirectories\n \n for dirpath, dirnames, filenames in os.walk(topLevelDirectory):\n #print '---dirpath---'\n #print dirpath\n #print '---dirnames---'\n #print dirnames\n #print '---filenames---'\n #print filenames\n #print '------'\n for f in filenames:\n #check filenames for a match\n if f == filename:\n tempSubDirs[dirpath] = True\n\n #in Python 3 dict.keys(), dict.values() and dict.items() will all return iterable views instead of lists \n if sys.version_info >= (3, 0):\n return list(tempSubDirs.keys())\n \n return tempSubDirs.keys()", "def find_files(directory, pattern):\n try:\n for root, dirs, files in os.walk(directory, followlinks=True):\n for basename in files:\n if fnmatch.fnmatch(basename, pattern):\n filename = os.path.join(root, basename)\n yield filename\n except Exception as e:\n sys.stderr.write(e.message)\n exit(1)", "def find_files(d):\n for root, dirs, files in os.walk(d):\n for f in files:\n yield path.abspath(path.join(root, f))", "def all_files_under(path):\r\n for cur_path, dirnames, filenames in os.walk(path):\r\n for filename in filenames:\r\n yield os.path.join(cur_path, filename)", "def files_in_dir(root_dir):\n file_set = set()\n\n for dir_, _, files in os.walk(root_dir):\n for file_name in files:\n rel_dir = os.path.relpath(dir_, root_dir)\n rel_file = os.path.join(rel_dir, file_name)\n file_set.add(rel_file)\n\n return [Path(PureWindowsPath(f)) for f in file_set]", "def gather_candidates(path, blacklist=BLACKLIST): # pylint: disable=W0102\n candidates = set()\n if not os.path.isdir(path):\n return candidates\n\n blacklist_re = multiglob_compile(blacklist, prefix=True)\n for fname in os.listdir(path):\n fpath = os.path.join(path, fname)\n\n # Skip hidden files and directories\n if fname.startswith('.'):\n log.debug(\"Skipped hidden file/folder: %s\", fpath)\n continue\n\n # Skip blacklisted paths\n if blacklist_re.match(fpath):\n log.debug(\"Skipped blacklisted path: %s\", fpath)\n continue\n\n # Directories get a free pass to stage two\n if os.path.isdir(fpath):\n log.debug(\"Directories are automatically accepted: %s\", fpath)\n candidates.add(fpath)\n continue\n\n # Skip non-executable files that need +x to be potential games\n if not os.access(fpath, os.X_OK):\n if not os.path.splitext(fpath)[1].lower() in EXEC_EXCEPTIONS:\n continue\n\n candidates.add(fpath)\n return candidates", "def get_parent_paths(path, stop_at):\n assert stop_at in path\n if stop_at not in path:\n return []\n if path == stop_at:\n return [path]\n\n path = os.path.expanduser(path)\n\n ps = [os.path.abspath(path)]\n while True:\n path = os.path.abspath(os.path.join(path, os.pardir))\n ps.append(path)\n if path == stop_at:\n break\n\n return ps", "def get_path_to_rel_location(directory_to_find):\n path = Path.cwd()\n num_tries = 5\n for num_up_folder in range(num_tries):\n path = path.parent\n if path / directory_to_find in path.iterdir():\n break\n\n if num_tries == num_up_folder:\n raise FileNotFoundError(f\"The directory {directory_to_find} could not be found in the {num_tries}\"\n f\" directories above this file's location.\")\n return path / directory_to_find" ]
[ "0.6888166", "0.6649314", "0.654585", "0.65055346", "0.63376766", "0.6318664", "0.626144", "0.62060845", "0.6189229", "0.6188247", "0.6138768", "0.61079556", "0.60975707", "0.6085025", "0.60789895", "0.60708904", "0.6052941", "0.60421807", "0.60420406", "0.6041359", "0.6025379", "0.60148364", "0.5994873", "0.5978255", "0.5976968", "0.59393805", "0.59294593", "0.5926064", "0.5924731", "0.58968806" ]
0.77151185
0
Removes any words containing digits like fligh numbers, phone number etc
def remove_flight_numbers(text): return ' '.join(word for word in text.split() if not any(char.isdigit() for char in word))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_words_digits(text):\n return \" \".join([word for word in str(text).split() if not any(c.isdigit() for c in word) ])", "def stripword( s ) :\n return re.sub( '[\\W\\d]', '', s )", "def removeNumbers(self, words):\n\t\treturn re.sub(r'\\d', '', words)", "def sanitize_text(text):\n return re.sub(r\"\\d+\", \"\", text)", "def remove_numbers(text):\n return ''.join([i for i in text if not i.isdigit()])", "def remove_numbers(text):\n return re.sub(r'\\d+', '',text)", "def remove_digits(text):\n return re.sub(r'[\\d]', '', text)", "def _remove_digits(self, text: str) -> str:\n return re.sub(r\"\\d+\", \" \", str(text))", "def clean_word(word: str) -> str:\n\n cleaned_word = ''\n for char in word.lower():\n if char.isalnum():\n cleaned_word = cleaned_word + char\n return cleaned_word", "def cleanup(text):\n\n\tRE_D = re.compile('\\d')\n\n\ttokens = text.split()\n\tnew_tokens = list()\n\tfor t in tokens:\n\t\tif RE_D.search(t):\n\t\t\tcontinue\n\t\tfor p in string.punctuation:\n\t\t\tif p == \".\":\n\t\t\t\tcontinue\n\t\t\tt=t.replace(p,\"\")\n\t\tnew_tokens.append(t.lower().strip())\n\n\treturn \" \".join(new_tokens)", "def remove_free_digits(text):\n return RegexFilters.replace_free_digits(text, \" \")", "def remove_digits(self, text):\n return re.sub('\\d+', '', text)", "def remove_numbers(text):\n result = re.sub(r'\\d+', '', text)\n return result", "def clean_numbers(text):\n return regex.sub(\"\\d+\", ' NUM', text)", "def remove_punctations_fun(self): \n self.doc = re.sub('[^a-zA-Z0-9]', ' ', self.doc)", "def clean_sentence(sentence):\n words = sentence.lower().split()\n clean_sent = \"\"\n for word in words:\n clean_sent += (''.join(list(map(lambda x: x if x in ascii_lowercase or x in \"1234567890\" else '', list(word))))) + \" \"\n return clean_sent[:-1]", "def tweet_clean_numbers(word):\n if not re.search(r'[0-9]+', word):\n return word\n if len(word)==4 and re.search(r'[0-9]{4}', word) and 1900 < int(word) < 2019:\n return word\n word = re.sub(r'^([0-9]|[\\+\\-%/\\*\\.:])+[0-9%/\\+\\*\\.x:]*$', '<number>', word)\n return word", "def clean_word( s ):\n words = ''\n for i in s:\n if i.isalpha():\n words += i\n else:\n words = words\n return words", "def cleaning(string):\n\n if type(string) == float or type(string) == int:\n return string\n res = ''\n if string != string:\n return string\n string = string.replace(\"\\\\r\", \"\")\n string = string.replace(\"\\\\n\", \"\")\n string = string.replace(\"\\\\b\", \"\")\n string = string.replace(\"\\\\t\", \"\")\n for i in string:\n if i.isalpha():\n res = res + i\n return res.lower()", "def strip_non_num(phone):\n return ''.join([i for i in phone if i.isdigit()])", "def basic_cleaning2(string):\n\n string = string.lower()\n string = re.sub('[0-9\\(\\)\\!\\^\\%\\$\\'\\\"\\.;,-\\?\\{\\}\\[\\]\\\\/]', ' ', string)\n string = re.sub(' +', ' ', string)\n return string", "def remove_nums(self, text):\r\n return text.translate(None, digits)", "def remove_nums(self, text):\r\n return text.translate(None, digits)", "def remove_nums(self, text):\r\n return text.translate(None, digits)", "def cleanWord(word):\r\n newWord = [letter.lower() for letter in word if letter.isalpha()]\r\n return \"\".join(newWord)", "def _cleanse(text):\n return ''.join([character for character in text\n if character.isalnum()]).lower()", "def clean(word):\n word = word.lower()\n stopwords = ['of', 'and','to', 'at', 'in', '@']\n word = re.sub(r'[\\&/\\-\\(\\)\\|\\@,\\]\\[]+', ' ', word)\n for stopword in stopwords:\n pattern = r'\\b' + stopword + r'\\b'\n pattern = re.compile(pattern)\n word = re.sub(pattern, '', word)\n word = re.sub(r'\\s\\s+', ' ', word)\n return word", "def strip_numbers(s):\n if s:\n s = u' '.join([x for x in s.split(' ') if not x.isdigit()])\n return s", "def replace_any_non_letter_or_number_character(text):\n text = text.strip()\n text = re.sub('[^A-Za-z0-9 ]+', '', text)\n return text", "def remove_nonalpha(text):\n text = ''.join(c for c in text if c.isalpha() or c == ' ')\n return re.sub(\" +\", \" \", text)" ]
[ "0.8487759", "0.78358305", "0.775658", "0.73313", "0.72857094", "0.72296715", "0.7228846", "0.7227922", "0.72268903", "0.71626186", "0.7154582", "0.71416676", "0.71087503", "0.71003777", "0.70956796", "0.70505774", "0.70334387", "0.70186734", "0.7014774", "0.6977059", "0.69701934", "0.69582945", "0.69582945", "0.69582945", "0.6951391", "0.6940458", "0.6937635", "0.6926324", "0.691884", "0.691735" ]
0.7885486
1
pushshift.io's Reddit archives are compressed in different formats over time. Find the correct filename given the date.
def find_reddit_filename(wildcards): yearmonth = wildcards.year + '-' + wildcards.month if yearmonth <= '2017-11': ext = '.bz2' elif yearmonth <= '2018-10': ext = '.xz' else: ext = '.zst' return DATA + "/downloaded/reddit/" + yearmonth + ext
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_zip_file_url_for_specific_date(date):\n date_format = date.strftime('%d%m%y')\n return __class__.href_pattern.format(date_format)", "def get_archive_filename():\r\n today = datetime.date.today()\r\n return str(today)", "def gen_filename_from_date(path,date,autoincrement = True):\n \n fname = date.isoformat().replace(':','.')\n \n if autoincrement:\n\n onlyfiles = [f for f in listdir(path) if isfile(join(path, f)) and f!='.DS_Store']\n \n found_numbers = [int(f.strip('.html').split('_')[1]) for f in onlyfiles if fname == f[0:len(fname)] ]\n \n highest = -1 \n if len(found_numbers)>0:\n highest = max(found_numbers)\n \n return \"{}/{}_{}.html\".format(path,fname,highest+1)", "def date_to_filename(base_path, raw_date_string):\n raw_date_string = raw_date_string[:-1]\n month, day, year = raw_date_string.split(\"/\")\n relative_path = \"{}/{}/{}.md\".format(year, month, day)\n return base_path / relative_path", "def get_filename(out_dir, file_date, extension):\n return path.join(out_dir, f'CrossrefCitations_{file_date}.{extension}')", "def getFileName(self, date, ext='nc4'):\n\n if self.collection is None:\n raise Exception('Invalid collection, check data exists!')\n if ext[0] == '.':\n ext = ext[1:]\n \n datefmt = '%Y%m'\n if self._F in (1, 3, 6, 'D'): # For all frequencies less than or equal to one (1) day\n datefmt += '%d'\n\n dstr = date.strftime( datefmt )\n tmp = [self.collection, dstr, ext]\n if self._type == 'M2':\n tmp = [ getMerraStream(date) ] + tmp \n return '.'.join( tmp )", "def _image_filename(image_name):\n return '{}.tar'.format(image_name.replace(':', '_').replace('/', '_'))", "def extract_date(filename, corpus):\n try:\n if corpus in (\"ester1\", \"ester2\"):\n date_match = re.match(r'^(\\d\\d\\d\\d)', filename)\n else:\n if \"EST2BC-FRE-FR-FINTER-DEBATE\" in filename:\n date_match = re.match(r'.*\\_(\\d\\d\\d\\d)\\d\\d\\d\\d\\_', filename)\n elif \"EST2BC_FRE_FR\" in filename:\n date_match = re.match(r'.*\\_(\\d\\d\\d\\d)\\d\\d\\d\\d\\_', filename)\n else:\n date_match = re.match(r'.*\\_(\\d\\d\\d\\d)\\-', filename)\n date = str(date_match.group(1))\n return date\n\n except Exception as e:\n print(\"Exception du try extract_date\")\n print(e)\n date = \"NA\"\n return date", "def get_file_date(self, file: str) -> date:", "def download(date, shared_args):\n # year with century, zero padded month, then full date\n # TODO fix merra url to include new format strings\n url = settings.MERRA_URL % (date.strftime('%Y'), date.strftime('%m'),\n date.strftime('%Y%m%d'))\n\n filename = url_download(url, settings.MERRA_DIR, shared_args, auth=settings.MERRA_LOGIN)\n return filename", "def getLatestDate(market, folder):\n \n if market == 'stockOption':\n m = 'DTOP_O_'\n elif market == 'indexFuture':\n m = 'DTOP_F_'\n \n dateList = []\n # get file list from directory\n for f in os.listdir(folder):\n if m in f:\n # crop the date from filename\n row = f.replace(m,'').replace('.zip','')\n dateList.append(date(int(row[:4]), int(row[4:6]), int(row[6:])))\n\n \n latest = dateList[0]\n for x in range(1,len(dateList)):\n if dateList[x] > latest:\n latest = dateList[x]\n \n return latest", "def date_from_filename(filename):\n\n if filename.startswith(\"Hearthstone Screenshot\"):\n # eg. Hearthstone Screenshot 01-15-17 17.27.24.png\n date_list = filename[23:31].split('-')\n date_list[2] = '20' + date_list[2] # 15->2015\n else: # underscored version pre mid 2015\n # eg. Hearthstone_Screenshot_1.3.2014.20.16.36.png\n date_list = filename[23:-13].split('.')\n if len(date_list[0]) == 1:\n date_list[0] = '0' + date_list[0]\n if len(date_list[1]) == 1:\n date_list[1] = '0' + date_list[1]\n\n time_list = filename[-12:-4].split('.')\n date_list[0], date_list[1] = date_list[1], date_list[0] # american->english date\n date_list.reverse()\n datetime = '/'.join([*date_list, *time_list])\n return datetime", "def base_filename_for_feed_item(feed_item):\n return \"{}_{}\".format(\n int(to_epoch(feed_item.upload_time)),\n feed_item.video_id\n )", "def get_filename_by_date(root_path: str, prefix_path: str, dates_set: set) -> str:\n for date in dates_set:\n full_path = f\"{prefix_path}_{date}.txt\"\n full_path = os.path.join(root_path, full_path)\n if os.path.exists(full_path):\n return full_path\n return \"\"", "def _retrosheet_filename(game_id, data_root):\n # game id is TTTYYYYMMDDN.\n team = game_id[:3]\n year = game_id[3:7]\n file_pattern = year + team + \".EV*\"\n file_path = os.path.join(data_root, \"retrosheet\", year, file_pattern)\n file_matches = glob.glob(file_path)\n return file_matches[0] if len(file_matches) else None", "def get_filename(link):\r\n return link[link.rfind(\"/\") + 1:]", "def __parseDailyFilename(self, f):\n base = os.path.basename(f)\n\n tokens = base.split('.')\n if len(tokens) < 6:\n # assume it's an old file in the format A2000089etcetc.tif i.e. ?YYYYDDD*\n yr = base[1:5]\n day = base[5:8]\n else:\n # assume it's a file in the newer format ?*.YYYY.DDD.etc format\n varname, yr, day, temporalSummary, res, spatialSummary = tokens[0:6]\n outTemplate = varname + \"{}.{}.{}.\" + \"{}.{}.{}.tif\".format(temporalSummary, res, spatialSummary)\n if self._outTemplate == \"FILLED-OUTPUT{}.{}.{}.TemporalSummary.Res.SpatialSummary.tif\":\n self._outTemplate = outTemplate\n else:\n assert self._outTemplate == outTemplate\n return day, yr", "def get_filename(self):\n return self.get_package_name() + '.' + ARCH + \".rpm\"", "def download_archive(self):\n\n def time_convert(structure):\n \"\"\"\n :param structure: tuple representation of time\n :return: GitHub archive time\n \"\"\"\n \n \n join_number_to_zero = lambda number: (\"\" if number > 9 else \"0\") + str(number)\n\n return \"%s-%s-%s-%s\" % (\n structure.tm_year, join_number_to_zero(structure.tm_mon), join_number_to_zero(structure.tm_mday),\n structure.tm_hour)\n\n current_time = self.get_time()\n self.logger.debug(__name__ + \": \" + \"current time: \" + str(gmtime(current_time)))\n\n difference = -25200\n #timezone difference in seconds between GMT and west coast of USA\n\n downloading_time = int(timegm(self.config[\"last_connection_time\"])) + 3600\n self.logger.debug(__name__ + \": \" + \"downloading time: \" + str(gmtime(downloading_time)))\n\n if downloading_time > current_time - 7200:\n self.logger.info(__name__ + \": \" + \"unable to download file (time limiting).\")\n return\n\n downloading_time += difference\n\n json_file_name = self.download_file(time_convert(gmtime(downloading_time)))\n\n self.config[\"last_connection_time\"] = gmtime(downloading_time - difference)\n self.logger.debug(__name__ + \": \" + \"last_connection_time: \" + str(self.config[\"last_connection_time\"]))\n\n return json_file_name", "def url_file_name(url):\r\n return url[url.rfind('/') + 1:]", "def test_file_name_to_date_unzipped(self):\n\n self.assertTrue(\n satellite_io.file_name_to_date(FILE_NAME_ZIPPED) ==\n VALID_DATE_STRING\n )", "def generate_file_name(entry):\n return str_for_file(u'{name}, {year}, {title}'.format(\n year=entry['year'],\n name=get_last_name(entry['author'][0]),\n title=entry['title']\n ))", "def date_string_slipp(file_directory):\n \n pattern = \"[0-9]*\"\n \n date = re.findall(pattern,file_directory)\n\n new_date = \"\"\n first = 0;\n \n for index_date, d in enumerate(date):\n if d and first==0:\n new_date=str(d)\n first = 1;\n elif d:\n new_date+=str(d)\n \n return new_date", "def test_file_name_to_date_zipped(self):\n\n self.assertTrue(\n satellite_io.file_name_to_date(FILE_NAME_ZIPPED) ==\n VALID_DATE_STRING\n )", "def give_filename( url_rel ):\n filename = basename( url_rel )\n\t# Add time information\n now_datetime = datetime.datetime.now( )\n now_string = now_datetime.strftime( \"%Y-%m-%d-%H-%M-%S\" )\n if filename.endswith( '.pdf' ):\n\t\tfileno, ext_pdf = splitext( filename )\n\t\tpdf_filename = fileno + '-' + now_string + ext_pdf\n\t\treturn pdf_filename", "def getArchiveName(zip_file):\n\n with ZipFile(zip_file, 'r') as f:\n names = f.namelist()\n\n return str(names[0])", "def get_date_from_filename(file_path):\n file_name = basename(file_path)\n name, _ = splitext(file_name)\n _, date = name.split('_')\n\n return date", "def getArchiveURI(sha1,archivePath,fileType='jpg'):\n (sha1Path,filename)=getSha1Path(sha1)\n return(archivePath+'/'+sha1Path+'/'+filename+'.'+fileType)", "def normalizeFilenameToCommonDateFormat(filename):\n rgx_date = re.search(r'(\\d+)-(\\d+)-(\\d+)', filename)\n\n if (rgx_date == None):\n raise ValueError(\"Not interested in this file!\")\n \n year = rgx_date.group(1)\n month = rgx_date.group(2)\n day = rgx_date.group(3)\n\n return \"%s%s%s.pdf\" % (year, month, day)", "def paths_sort(path):\n base_name = os.path.basename(path)\n \n stat_name = base_name.split('.')[0] \n\n date = base_name.split('.')[1]\n \n try:\n date = datetime.datetime.strptime(date, '%Y-%m-%d')\n \n return date, stat_name\n except Exception as e:\n print(e)" ]
[ "0.6548199", "0.6423656", "0.6148775", "0.5926651", "0.59204835", "0.5764852", "0.5671795", "0.5668141", "0.56277394", "0.56209695", "0.5613353", "0.55873984", "0.5582913", "0.55802494", "0.5544131", "0.5535669", "0.551915", "0.5513528", "0.5506223", "0.5499908", "0.54785347", "0.5477283", "0.5467822", "0.5461927", "0.54464185", "0.54340434", "0.5418594", "0.5409335", "0.54079896", "0.5407591" ]
0.7414133
0
Get all the sources of word counts we have in a language.
def language_count_sources(lang): return [ DATA + "/counts/{source}/{lang}.txt".format(source=source, lang=lang) for source in LANGUAGE_SOURCES[lang] ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def multisource_counts_to_merge(multisource, lang):\n result = [\n _count_filename(source, lang)\n for source in MERGED_SOURCES[multisource]\n if lang in SOURCE_LANGUAGES[source]\n ]\n return result", "def language_text_sources(lang):\n return [\n DATA + \"/tokenized/{source}/{lang}.txt\".format(source=source, lang=lang)\n for source in LANGUAGE_SOURCES[lang]\n if source in FULL_TEXT_SOURCES\n ]", "def get_source_counts(self):\n return deepcopy(self._source_counts)", "def fetch_languages(self):\r\n \r\n # tokenize, clean and filter document tokens \r\n toks = [re.sub(r'[^a-zA-Z]','', tok.text.lower().strip()) for tok in self.doc]\r\n toks = [tok for tok in toks if len(tok)>1 and tok in LANGUAGES]\r\n toks = sorted(set(toks))\r\n \r\n return toks", "def main_func(sources):\n art_count = 0\n word_count = 0\n for source in sources:\n titles = get_articles(source)\n art_count += len(titles)\n word_count += count_word('trump', titles)\n\n return (word_count, art_count)", "def detect_language(text, LANGUAGES):\n lang = None\n word_count = 0\n our_test = []\n \n for language in LANGUAGES:\n \n result = get_word_count(text, language['common_words'])\n print(result)\n #import pdb; pdb.set_trace()\n if result > word_count:\n lang = language['name']\n word_count = result\n \n return lang", "def all_word_counts(self):\n return self._get(\"all_word_counts\")", "def get_language_data_statistic(language_data_dir, mode):\n\n label_paths = list(map(\n lambda x: os.path.join(language_data_dir, x),\n LABEL_FILENAMES))\n\n # Analyze the language data files\n language_data_stats = []\n for label_path in label_paths:\n # If gold doesn't exist, don't read it\n if not os.path.exists(label_path):\n if \".gold\" in os.path.basename(label_path):\n language_data_stats.append(None)\n continue\n\n with open(label_path) as label_file:\n num_instances = 0\n num_statistic = 0\n for label_line in label_file:\n num_instances += 1\n label_src, label_tgt = label_line.rstrip(\"\\n\").split(\"\\t\")[:2]\n # Count multiword targets\n if mode == \"multiword\":\n if len(label_tgt.split(\" \")) > 1:\n num_statistic += 1\n # Count discontiguous targets\n if mode == \"discontiguous\":\n if \"...\" in label_tgt:\n num_statistic += 1\n language_data_stats.append(truncate(num_statistic / num_instances, 3))\n return language_data_stats", "def word_frequencies(url):\n\ttexts = get_all_texts(url)\n\tcount = count_words_in_sentence_list(texts)\n\treturn count", "def collect_english_cats(self):\n tf.logging.info('collecting english categories')\n self.english_cats = list(\n self.frames(filter_english=True, filter_category=True))", "def get_word_counts(slides) -> List[int]:\n word_count = []\n for slide in slides:\n # print(f\"========== slide {len(text_count)+1} ========== [{slide.slide_layout.name}]\")\n words = 0\n # find all text\n for shape in slide.shapes:\n if not shape.has_text_frame:\n continue\n # print(shape.name)\n for paragraph in shape.text_frame.paragraphs:\n for run in paragraph.runs:\n # print(\" \" + run.text)\n words += len(run.text.split())\n word_count.append(words)\n return word_count", "def languages_under_threshold(threshold):\n pattern = os.path.join(RAW_UD_DIR, \"**/stats.xml\")\n filenames = glob.glob(pattern, recursive=True)\n counts = {}\n for filename in filenames:\n dirname = os.path.split(os.path.split(filename)[0])[1]\n language = re.match(r\"UD_(\\w+)-\\w+\", dirname).groups(1)[0]\n tree = ElementTree.parse(filename)\n root = tree.getroot()\n num_tokens = int(root.find(\"./size/total/tokens\").text)\n if language not in counts:\n counts[language] = num_tokens\n else:\n counts[language] += num_tokens\n return sorted([lg for lg, num in counts.items() if num <= threshold])", "def computeWordsFrequencies(self):\n token_stream = self._tokenize(self.readable)\n token_map = self._countTokens(token_stream)\n # print token_map.items()\n return sorted(token_map.items(), key = lambda x : x[1], reverse = True)", "def get_counts(self):\n counts = {}\n for document in self.docs:\n for word in document:\n if word not in counts.keys():\n counts[word] = 1\n else:\n counts[word] += 1\n return counts", "def mapWordsFrequencies(self):\n token_stream = self._tokenize(self.readable)\n token_map = self._countTokens(token_stream)\n return token_map", "def _extract_vocab_data(source_files):\n vocab = set()\n\n for source_file in source_files:\n with tf.gfile.Open(source_file) as vocab_file:\n for line in vocab_file:\n tokens = line.split()\n vocab.update(tokens)\n\n return list(vocab)", "def source_keys(self):\n for source_key in self._counts.keys():\n yield source_key", "def get_sources():\n url = base_url + \"sources\"\n params = {\"language\": \"en\"}\n resp = requests.get(url, params=params)\n data = resp.json()\n sources = [src['id'].strip() for src in data['sources']]\n print(\"all the sources:\")\n print(sources)\n return sources", "def listsources():\n\tmain_url = \" https://newsapi.org/v2/sources?apiKey=5f81b593f35d42a8980313250c03d7e7\"\n\n\t# fetching data in json format \n\topen_source = requests.get(main_url).json() \n\n\t# getting all articles in a string sources\n\tsource = open_source[\"sources\"] \n\n\t# empty list which will \n\t# contain all trending newssources \n\tresults = [] \n\t\n\tfor k in source: \n results.append(k[\"id\"])\n \n \t\n\tfor w in results[0:4]:\n print(w)", "def get_words_list(folder_name):\n files_list = [x for x in folder_name.glob('**/*.txt')]\n\n words_list = []\n for i in files_list:\n words_list.extend(open(i).readlines())\n \n return dict(Counter(splitter(words_list)))", "def corpora_stats(output):\n igFiles = []\n for root, directories, filenames in os.walk(output + \"/ig/\"):\n for filename in filenames:\n igFiles.append(os.path.join(root, filename))\n igFiles = filter(lambda x: \".txt\" in x, igFiles)\n words = []\n for file in igFiles:\n fileH = open(file, \"r\")\n words = words + fileH.read().split(\" \")\n print(\"Number of words in IG corpus: {}\".format(len(words)))\n print(\"Vocabulary size of IG corpus: {}\".format(len(set(words))))", "def load_common_words(language: str, tot_num: int) -> ty.Set[str]:\n logg = logging.getLogger(f\"c.{__name__}.load_common_words\")\n logg.setLevel(\"DEBUG\")\n logg.debug(\"Start load_common_words\")\n\n lang = pycountry.languages.get(name=language)\n lang_alpha2_tag = lang.alpha_2\n\n common_words_folder = get_package_folders(\"common_words\")\n common_words_path = common_words_folder / f\"{lang_alpha2_tag}.txt\"\n\n common_words = set()\n with common_words_path.open() as common_words_file:\n for line in common_words_file:\n common_words.add(line.strip())\n if len(common_words) == tot_num:\n break\n\n logg.debug(f\"common_words: {common_words}\")\n\n return common_words", "def get_sources():\n url = base_url + \"sources\"\n params = {\"language\": \"en\"}\n resp = requests.get(url, params=params)\n data = resp.json()\n sources = [src['id'].strip() for src in data['sources']]\n print(\"all the sources\")\n print(sources)\n return sources", "def get_language_distribution(username):\n users_repos, api_calls = get_repos(username)\n \n language_count = {}\n \n for repo in users_repos:\n language_count[repo.language] = language_count.get(repo.language, 0) + 1\n \n return language_count, api_calls", "def get_word_frequency():\n counter = Counter()\n with open('resource/word-count.txt', encoding=\"utf8\") as f:\n for line in f.readlines():\n try:\n word, count = line.split(':')\n if (word == \"RT\"):\n continue\n count = int(count)\n counter[word] += count\n except Exception as e:\n continue\n return counter", "def count_words(filename):", "def countWords(emailid, englishwords):\n email = e.Email(emailid)\n words = email.body\n subject = email.parsedsubject\n emailcontent = subject.update(words)\n counter = {word:0 for word in englishwords}\n ordered = OrderedDict(sorted(counter.items(), key=lambda t: t[0]))\n for word in words:\n if word in counter:\n counter[word] = counter[word] + 1\n\n return list(counter.values())", "def source_freq(self) -> int:", "def updateWordCounts():\n emaildata = loadEmailData()\n englishwords = importDictionary()\n countAllWords(emaildata, englishwords)", "def find_langs(args):\n infile = args.source\n langs = {}\n for line in infile:\n name_and_files = line.split()\n name = name_and_files[0]\n if name not in langs:\n langs[name] = []\n langs[name] += read_files.filter_files(name_and_files[1:])\n langs[args.unknown] += read_files.filter_files(args.classify)\n return langs" ]
[ "0.6437999", "0.6354548", "0.59641296", "0.5802575", "0.5790304", "0.5753598", "0.5729502", "0.5714558", "0.56884634", "0.5630879", "0.5625249", "0.5615212", "0.5581965", "0.5549736", "0.5501632", "0.54856443", "0.5466191", "0.54459345", "0.5438521", "0.54131645", "0.5384228", "0.5372655", "0.53650236", "0.5355648", "0.5351122", "0.5350806", "0.53338355", "0.5318376", "0.5300664", "0.5298273" ]
0.7888476
0
Get all the sources of tokenized text we have in a language.
def language_text_sources(lang): return [ DATA + "/tokenized/{source}/{lang}.txt".format(source=source, lang=lang) for source in LANGUAGE_SOURCES[lang] if source in FULL_TEXT_SOURCES ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fetch_languages(self):\r\n \r\n # tokenize, clean and filter document tokens \r\n toks = [re.sub(r'[^a-zA-Z]','', tok.text.lower().strip()) for tok in self.doc]\r\n toks = [tok for tok in toks if len(tok)>1 and tok in LANGUAGES]\r\n toks = sorted(set(toks))\r\n \r\n return toks", "def get_source_tags(self):\n return ['en:' + self.tag_manager.normalize_tag_wtokenization(t, self.tries['en'], prefixed=False) for t in self.tag_manager.unprefixed_source_tags]", "def _get_source_chunks(self, input_text, language=None):\n chunks = ChunkList()\n sentence_length = 0\n tokens = api.get_annotations(self.service, input_text, language)\n for i, token in enumerate(tokens):\n word = token['text']['content']\n begin_offset = token['text']['beginOffset']\n label = token['dependencyEdge']['label']\n pos = token['partOfSpeech']['tag']\n if begin_offset > sentence_length:\n chunks.append(Chunk.space())\n sentence_length = begin_offset\n chunk = Chunk(word, pos, label)\n # Determining default concatenating direction based on syntax dependency.\n chunk.maybe_add_dependency(\n i < token['dependencyEdge']['headTokenIndex'])\n chunks.append(chunk)\n sentence_length += len(word)\n return chunks", "def _get_source_chunks(self, input_text, language=None):\n chunks = ChunkList()\n seek = 0\n result = self._get_annotations(input_text, language=language)\n tokens = result['tokens']\n language = result['language']\n for i, token in enumerate(tokens):\n word = token['text']['content']\n begin_offset = token['text']['beginOffset']\n label = token['dependencyEdge']['label']\n pos = token['partOfSpeech']['tag']\n if begin_offset > seek:\n chunks.append(Chunk.space())\n seek = begin_offset\n chunk = Chunk(word, pos, label)\n if chunk.label in _DEPENDENT_LABEL:\n # Determining concatenating direction based on syntax dependency.\n chunk.dependency = i < token['dependencyEdge']['headTokenIndex']\n if chunk.is_punct():\n chunk.dependency = chunk.is_open_punct()\n chunks.append(chunk)\n seek += len(word)\n return chunks, language", "def sentences(self) -> List[str]:\n\t\treturn [self.text[start:end] for start, end in self.tokenizations]", "def sentences(self) -> List[str]:\n\t\treturn [self.text[start:end] for start, end in self.tokenizations]", "def tokenize_en(text):\n return [tok.text for tok in spacy_en.tokenizer(text)]", "def iter_trans_tokens(self, source: str) -> Iterator[TransTokenSource]:\n yield from self.editor.iter_trans_tokens(source)\n if self.all_name is not None:\n yield self.all_name, source + '.all_name'\n yield from tkMarkdown.iter_tokens(self.desc, source + '.desc')\n for item in self.editor_extra:\n yield from item.iter_trans_tokens(f'{source}:{item.id}')", "def tokenize_en(text):\n return [tok.text for tok in spacy_en.tokenizer(text)]", "def tokenize_en(text):\n return [tok.text for tok in spacy_en.tokenizer(text)]", "def tokenize_en(text):\n return [tok.text for tok in spacy_en.tokenizer(text)]", "def tokenize_en(text):\n return [tok.text for tok in spacy_en.tokenizer(text)]", "def tokenize(text):\n source = list(text.rstrip().replace('\\n', ' '))\n return source", "def tokenize_en(text):\n spacy_en = spacy.load('en')\n return [tok.text for tok in spacy_en.tokenizer(text)]", "def analyze_syntax(self, languageModel):\n analysis = languageModel.annotate_text(include_entities=False, include_sentiment=False)\n return analysis.tokens", "def get_tokenized_texts(self, to_lower=True):\n # init pages for each url\n if not self.texts:\n self.pages = []\n # for rate, url, description\n for u in self.urls:\n self.pages.append(Page(u))\n # form texts\n self.texts = []\n for p in self.pages:\n # catch all excetptions of pages that we cannot parse\n try:\n tokens = p.get_list_of_tokens(to_lower=to_lower)\n except ParsingError:\n self.log.warn(u\"Failed to get tokens from text of url \"\n \"{url}. Skipping it.\".format(url=p.url))\n # preserve indexing for backward restoring possibility\n self.texts.append([])\n continue\n self.texts.append(tokens)\n\n # self.texts = [p.get_list_of_tokens() for p in self.pages]\n return self.texts", "def process_text(text):\n return [token.text for token in nlp(text) if not token.is_stop]", "def _extract_vocab_data(source_files):\n vocab = set()\n\n for source_file in source_files:\n with tf.gfile.Open(source_file) as vocab_file:\n for line in vocab_file:\n tokens = line.split()\n vocab.update(tokens)\n\n return list(vocab)", "def preprocess(text):\n\tX = []\n\tsent_detector = nltk.data.load('tokenizers/punkt/english.pickle')\n\tfor t in text:\n\t\tsents = sent_detector.tokenize(t)\n\t\tresult = ''\n\t\tfor s in sents:\n\t\t\ttokens = word_tokenize(s)\n\t\t\tresult += ' ' + ' '.join(tokens)\n\t\tX.append(result)\n\treturn X", "def sents(self):\n\n text = str()\n for file in os.listdir(self.path):\n # checks if the given path contains a text file and opens it\n if file.endswith(\".txt\"):\n with open(self.path + \"/\" + file) as connection:\n text += connection.read()\n\n # tokenizes the text to sentences and tokenizes the tokenized sentences to words\n sentences_list = nltk.sent_tokenize(text)\n word_list = [nltk.word_tokenize(sent) for sent in sentences_list]\n\n return word_list", "def tokenize(src):\n\n pass", "def preprocess(self,text):\n return preprocess.get_tokens(text)", "def source(self) -> list:\n sources = self.source_control.list_sources()\n sources_list = [source['label'] for source in sources]\n return sources_list", "def get_all_techniques(src, source_name, tactic=None):\n filters = [\n Filter(\"type\", \"=\", \"attack-pattern\"),\n Filter(\"external_references.source_name\", \"=\", source_name),\n ]\n if tactic:\n filters.append(Filter('kill_chain_phases.phase_name', '=', tactic))\n\n results = src.query(filters)\n return remove_deprecated(results)", "def process_text(self, text, lemma=False):\n processed_text = TextGraph.nlp(text.lower())\n words = [t.text.strip() if not lemma else t.lemma_ for t in processed_text if not t.is_punct]\n return words", "def getTokens(self):\n list = []\n for i in range(self.startIdx, self.endIdx + 1):\n token = self.sentence[i]\n list.append(token)\n return list", "def tokenize_text(document, nlp):\n\n return [token.text for token in nlp(document)]", "def get_tokens(self, text):\n\t\treturn tuple(self._compiled_pattern.findall(text))", "def split_text_into_sentences(text: str, language: str='es') -> List[str]:\n if not language in ACCEPTED_LANGUAGES[language]:\n raise ValueError(f'Language {language} is not supported yet')\n\n nlp = spacy.load(language, disable=['tagger', 'parser', 'ner'])\n nlp.add_pipe(nlp.create_pipe('sentencizer'))\n text_spacy = nlp(text)\n return [str(sentence) for sentence in text_spacy.sents]", "def get_token_list():\n token_list = []\n tokens_dir_path = os.path.join(BASE_DIR, TOKENS_DIR)\n for dir, dirs, files in os.walk(tokens_dir_path):\n for file_name in files:\n file = open(os.path.join(tokens_dir_path, file_name), 'r')\n token_list.append(file.read().strip())\n file.close()\n return token_list" ]
[ "0.6808751", "0.6602341", "0.65438026", "0.6541259", "0.6345355", "0.6345355", "0.62279546", "0.61624664", "0.6071694", "0.6071694", "0.6071694", "0.6071694", "0.60606", "0.6059048", "0.5987303", "0.59685606", "0.593609", "0.5931265", "0.5897387", "0.58759075", "0.5802738", "0.57954973", "0.5761745", "0.57113886", "0.56965846", "0.5694699", "0.5688748", "0.56885034", "0.56837624", "0.5679482" ]
0.79785043
0
Given a multisource name like 'news' and a language code, find which sources of counts should be merged to produce it.
def multisource_counts_to_merge(multisource, lang): result = [ _count_filename(source, lang) for source in MERGED_SOURCES[multisource] if lang in SOURCE_LANGUAGES[source] ] return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def language_count_sources(lang):\n return [\n DATA + \"/counts/{source}/{lang}.txt\".format(source=source, lang=lang)\n for source in LANGUAGE_SOURCES[lang]\n ]", "def sources(source):\n\n source2 = models.Source(name=u\"Bob's Funerals.com\", url=u\"http://www.bobsfunerals.com\")\n source3 = models.Source(name=u\"Jim's Funerals.com\", url=u\"http://www.jimsfunerals.com\")\n return (source, source2, source3)", "def listsources():\n\tmain_url = \" https://newsapi.org/v2/sources?apiKey=5f81b593f35d42a8980313250c03d7e7\"\n\n\t# fetching data in json format \n\topen_source = requests.get(main_url).json() \n\n\t# getting all articles in a string sources\n\tsource = open_source[\"sources\"] \n\n\t# empty list which will \n\t# contain all trending newssources \n\tresults = [] \n\t\n\tfor k in source: \n results.append(k[\"id\"])\n \n \t\n\tfor w in results[0:4]:\n print(w)", "def language_text_sources(lang):\n return [\n DATA + \"/tokenized/{source}/{lang}.txt\".format(source=source, lang=lang)\n for source in LANGUAGE_SOURCES[lang]\n if source in FULL_TEXT_SOURCES\n ]", "def main_func(sources):\n art_count = 0\n word_count = 0\n for source in sources:\n titles = get_articles(source)\n art_count += len(titles)\n word_count += count_word('trump', titles)\n\n return (word_count, art_count)", "def find_langs(args):\n infile = args.source\n langs = {}\n for line in infile:\n name_and_files = line.split()\n name = name_and_files[0]\n if name not in langs:\n langs[name] = []\n langs[name] += read_files.filter_files(name_and_files[1:])\n langs[args.unknown] += read_files.filter_files(args.classify)\n return langs", "def mixed_author_sources(args: Dict[str, Any]) -> List[object]:\n query = [\n {\n \"$match\": {\n \"body\": {\"$ne\": \"\"},\n \"quotesUpdated\": {\"$exists\": True},\n \"outlet\": {\"$in\": args[\"outlets\"]},\n \"publishedAt\": {\n \"$gte\": args[\"begin_date\"],\n \"$lt\": args[\"end_date\"] + timedelta(days=1),\n },\n \"authorsFemaleCount\": {\"$gt\": 0},\n \"authorsMaleCount\": {\"$gt\": 0},\n }\n },\n {\n \"$project\": {\n \"_id\": 1,\n \"outlet\": 1,\n \"authors\": 1,\n \"authorsMale\": 1,\n \"authorsFemale\": 1,\n \"authorsUnknown\": 1,\n \"sourcesMaleCount\": 1,\n \"sourcesFemaleCount\": 1,\n \"sourcesUnknownCount\": 1,\n }\n },\n {\n \"$group\": {\n \"_id\": \"$outlet\",\n \"totalArticles\": {\"$sum\": 1},\n \"totalMaleSources\": {\"$sum\": \"$sourcesMaleCount\"},\n \"totalFemaleSources\": {\"$sum\": \"$sourcesFemaleCount\"},\n \"totalUnknownSources\": {\"$sum\": \"$sourcesUnknownCount\"},\n }\n },\n ]\n return query", "def get_language_distribution(username):\n users_repos, api_calls = get_repos(username)\n \n language_count = {}\n \n for repo in users_repos:\n language_count[repo.language] = language_count.get(repo.language, 0) + 1\n \n return language_count, api_calls", "def dashboard_article_sources():\n sources = dict()\n past_30 = offset_time_past(30, str=True)\n articles = mongo.db[app.config['ARTICLES_COLLECTION']]\n results = articles.find({'collected': {'$gt': past_30}}, {'_id': 0})\n for result in results:\n sources[result['source']] = sources.get(result['source'], 0) + 1\n sources = sorted(sources.items(), key=operator.itemgetter(1), reverse=True)\n data = sources[:10]\n return jsonify(data)", "def get_sources():\n url = base_url + \"sources\"\n params = {\"language\": \"en\"}\n resp = requests.get(url, params=params)\n data = resp.json()\n sources = [src['id'].strip() for src in data['sources']]\n print(\"all the sources:\")\n print(sources)\n return sources", "def fetch_all_news_codes():\n response = requests.get(SOURCE_URL)\n json = response.json()\n global news_codes\n for source in json['sources']:\n news_codes.append(source['id'])", "def combined_scraping(self, lang='none'): \n \n raw_pastes=self.scrape_raw(lang)\n patterns=self.find_all_patterns(raw_pastes)\n matches=self.find_matching_pastes(raw_pastes)\n \n return {\"matches\":matches, \"patterns\":patterns}", "def get_sources():\n url = base_url + \"sources\"\n params = {\"language\": \"en\"}\n resp = requests.get(url, params=params)\n data = resp.json()\n sources = [src['id'].strip() for src in data['sources']]\n print(\"all the sources\")\n print(sources)\n return sources", "def correct_counts():\n articles = mongo.db[app.config['ARTICLES_COLLECTION']]\n monitors = mongo.db[app.config['MONITORS_COLLECTION']]\n unique = articles.distinct('feed_source', dict())\n for link in unique:\n count = articles.count({'feed_source': link})\n monitors.update({'metadata.rss_link': link}, {'$set': {'hits': count}})", "def reconcileAllTitles(self):\n # stats container\n stats = {\n 'new': 0,\n 'existing': 0,\n 'ambiguous': 0,\n 'not found': 0\n }\n\n # build a list/map of all titles and their location\n locations = self.sources.getAllLocations(False)\n for loc in locations:\n self.logger.info(\"Looking at %s.\" % (loc.name))\n\n titlesInLoc = self.sources.getNonTranslatedTitlesInLocation(loc.id)\n\n # for each title:\n for aTitleInLoc in titlesInLoc:\n\n # clean up the 3D, (OV), etc...\n aTitleInLoc['title'] = stringUtils.cleanupTitle(aTitleInLoc['title'])\n\n ####\n # LOCAL CHECKS:\n # verify if this title and language are already in, insert it if needed,\n # updates the stats accordingly\n check = self.cheeckLocallyAndInsertTitle(aTitleInLoc = aTitleInLoc, theLanguage = loc.language)\n if check == 0:\n stats['existing'] += 1\n elif check == 1:\n stats['new'] += 1\n\n ####\n # TMDB REMOTE CHECKS:\n # new title, look it up on imdb\n else:\n check = self.checkTMBDAndInsertTitle(aTitleInLoc = aTitleInLoc, theLanguage = loc.language)\n if check == 0:\n stats['ambiguous'] += 1\n elif check == 1:\n stats['new'] += 1\n elif check == -1:\n stats['not found'] += 1\n\n self.logger.info(\"End run:\\n\\t{new:5} new translations\\n\\t{existing:5} existing\\n\\t{ambiguous:5} ambiguous\\n\\t{not found:5} not found\"\n .format_map(stats))", "def paracrawl_language_pair_source(lang):\n if lang == 'en':\n other = 'fr'\n else:\n other = 'en'\n\n lang1, lang2 = sorted([lang, other])\n langpair = '{}_{}'.format(lang1, lang2)\n\n return DATA + \"/tokenized/paracrawl-paired/{langpair}.{lang}.txt\".format(langpair=langpair, lang=lang)", "def mergeLanguageClaims(self):\r\n self.lang_claims.sort()\r\n for claim in self.lang_claims:\r\n if self.merged_claims and self.merged_claims[-1] == claim:\r\n self.merged_claims[-1].new_sources |= claim.new_sources\r\n else:\r\n self.merged_claims.append(claim)", "def detect_language(text, LANGUAGES):\n lang = None\n word_count = 0\n our_test = []\n \n for language in LANGUAGES:\n \n result = get_word_count(text, language['common_words'])\n print(result)\n #import pdb; pdb.set_trace()\n if result > word_count:\n lang = language['name']\n word_count = result\n \n return lang", "def extract_language_info(self, source: str) -> Dict[str, float]:\n languages = self.languages_compiled_exp.findall(source)\n language_info = {}\n for lang in languages:\n name = ' '.join(lang.split()[:-1])\n percent = float(lang.split()[-1]) # %\n language_info[name] = percent\n return language_info", "def show_sources_all():\n response = requests.get(SOURCE_URL)\n json = response.json()\n for source in json['sources']:\n print(u\"{0}: <{1}> {2}\".format(\"News Code\", source['id'], source['name']))", "def filter_by_langs(self, langs):\n new_candidate_set = CandidateSet()\n for c in self.as_list():\n if c.language in langs:\n new_candidate_set.add_or_retrieve_candidate(c)\n # Although we've added all the relevant candidates to the new candidate_set,\n # we need to update the by_xling_id dictionary in the new candidate_set.\n for xling_id, candidates in self.by_xling_id.items():\n for c in candidates:\n if c.uid in new_candidate_set.by_uid:\n new_candidate_set.update_xling_id(c, xling_id)\n return new_candidate_set", "def unknown_author_sources(args: Dict[str, Any]) -> List[object]:\n query = [\n {\n \"$match\": {\n \"body\": {\"$ne\": \"\"},\n \"quotesUpdated\": {\"$exists\": True},\n \"outlet\": {\"$in\": args[\"outlets\"]},\n \"publishedAt\": {\n \"$gte\": args[\"begin_date\"],\n \"$lt\": args[\"end_date\"] + timedelta(days=1),\n },\n \"authorsFemaleCount\": 0,\n \"authorsMaleCount\": 0,\n }\n },\n {\n \"$project\": {\n \"_id\": 1,\n \"outlet\": 1,\n \"authors\": 1,\n \"authorsMale\": 1,\n \"authorsFemale\": 1,\n \"authorsUnknown\": 1,\n \"sourcesMaleCount\": 1,\n \"sourcesFemaleCount\": 1,\n \"sourcesUnknownCount\": 1,\n }\n },\n {\n \"$group\": {\n \"_id\": \"$outlet\",\n \"totalArticles\": {\"$sum\": 1},\n \"totalMaleSources\": {\"$sum\": \"$sourcesMaleCount\"},\n \"totalFemaleSources\": {\"$sum\": \"$sourcesFemaleCount\"},\n \"totalUnknownSources\": {\"$sum\": \"$sourcesUnknownCount\"},\n }\n },\n ]\n return query", "def aggregate_translations(wildcards):\n checkpoint_output = checkpoints.align.get(**wildcards).output.translations\n return expand(build_dir + \"/{build_name}/{segment}/nextalign/masked.gene.{gene}.fasta\",\n build_name=wildcards.build_name,\n segment=wildcards.segment,\n gene=GENES[wildcards.segment])", "def get_results_from_aggregation_sources(self, context):", "def top_sources_all(args: Dict[str, Any]) -> List[object]:\n query = [\n {\n \"$match\": {\n \"body\": {\"$ne\": \"\"},\n \"quotesUpdated\": {\"$exists\": True},\n \"outlet\": {\"$in\": args[\"outlets\"]},\n \"publishedAt\": {\n \"$gte\": args[\"begin_date\"],\n \"$lt\": args[\"end_date\"] + timedelta(days=1),\n },\n }\n },\n {\n \"$project\": {\n \"outlet\": 1,\n \"sourcesMale\": 1,\n \"sourcesFemale\": 1,\n \"allSources\": {\n \"$concatArrays\": [\n {\"$ifNull\": [\"$sourcesFemale\", []]},\n {\"$ifNull\": [\"$sourcesMale\", []]},\n ]\n },\n }\n },\n {\"$unwind\": {\"path\": \"$allSources\", \"preserveNullAndEmptyArrays\": False}},\n {\"$group\": {\"_id\": \"$allSources\", \"count\": {\"$sum\": 1.0}}},\n {\"$sort\": {\"count\": args[\"sort\"]}},\n {\"$limit\": args[\"limit\"]},\n ]\n return query", "def find_what_to_combine():\n import os.path\n index = os.path.join('input', 'en', 'index.txt')\n f = open(index)\n for line in f:\n line = line.strip()\n if line.startswith('section-pages:'): break\n if not line.startswith('section-pages:'):\n raise Exception(\"Could not find a section-pages line in \" + index.txt)\n line = line[14:]\n items = [index]\n for item in line.split(','):\n item = item.strip()\n if item: items.append(os.path.join('input', 'en', item+'.txt'))\n return items", "def get_source_from_tumblr(self, source_word_strs, content_dir):\n # Consider each source word string\n host_names = []\n blog_posts = []\n for source_word_str in source_word_strs:\n\n # Create and dump, or load, the TumblrSource pickle.\n ts = TumblrSource(self, source_word_str, content_dir)\n ts.set_source(do_purge=self.source_do_purge)\n\n # Accumulate blog info, and posts\n for b_p in ts.blog_posts:\n if not 'blog' in b_p:\n continue\n h_n = b_p['blog']['name']\n if not h_n in host_names:\n host_names.append(h_n)\n blog_posts.append(b_p)\n\n # Consider sample posts from each blog\n total_tags = []\n for blog in blog_posts:\n\n # If there are no posts for the current blog, note that the\n # total number of tag appearances is zero, and continue to the\n # next blog\n n_tags = 0\n if not 'posts' in blog:\n total_tags.append(n_tags)\n continue\n\n # Consider each post from the current blog\n posts = blog['posts']\n for post in posts:\n\n # Consider each source word\n for source_word_str in source_word_strs:\n\n # Process the source word string to create log and\n # path strings, and assign input argument attributes\n (source_log,\n source_path,\n source_header,\n source_label,\n source_type,\n source_word) = ts.author_utility.process_source_words(source_word_str)\n\n # Count the appearances of the current source word in\n # the current post of the current blog\n n_tags += len(re.findall(source_word, \"\".join(post['tags']), re.I))\n\n # Note the total number of tag appearances for the current\n # blog\n total_tags.append(n_tags)\n\n # Find the blogs with the highest number of tag appearances\n np_total_tags = np.array(total_tags)\n min_total_tags = self.tumblr_min_total_tags\n index_blog, = np.nonzero(np_total_tags > min_total_tags)\n while np.size(index_blog) < self.tumblr_min_total_blogs and min_total_tags > 0:\n min_total_tags -= 1\n index_blog, = np.nonzero(np_total_tags >= min_total_tags)\n\n # Select the blogs with the highest number of tag appearances\n blogs_info = []\n posts = []\n likes = []\n notes = []\n for i_blg in index_blog:\n\n info = blog_posts[i_blg]['blog']\n blogs_info.append(info)\n if 'posts' in info:\n posts.append(info['posts'])\n else:\n posts.append(0)\n if 'likes' in info:\n likes.append(info['likes'])\n else:\n likes.append(0)\n\n note_count = 0\n for post in blog_posts[i_blg]['posts']:\n if 'note_count' in post:\n note_count += post['note_count']\n notes.append(note_count)\n\n # Assign number of posts, number of notes, and compute the\n # notes to posts ratio\n np_n_posts = np.array(posts)\n np_n_notes = np.array(notes)\n np_n_trusting = np.divide(np_n_notes, np_n_posts)\n\n # Convert the numeric scores to string scores\n np_s_posts = ts.n_to_s(np_n_posts)\n np_s_notes = ts.n_to_s(np_n_notes)\n np_s_trusting = ts.n_to_s(np_n_trusting)\n\n # Create a dictionary of blogs in order to print a JSON document\n # to a file\n blogs = []\n for i_blg in range(len(blogs_info)):\n blog = {}\n\n info = blogs_info[i_blg]\n\n if 'name' in info:\n blog['name'] = info['name']\n else:\n blog['name'] = \"\"\n if 'title' in info:\n blog['title'] = info['title']\n else:\n blog['title'] = \"\"\n if 'description' in info:\n blog['description'] = info['description']\n else:\n blog['description'] = \"\"\n if 'url' in info:\n blog['url'] = info['url']\n else:\n blog['url'] = \"\"\n\n blog['posts'] = np_n_posts[i_blg]\n blog['notes'] = np_n_notes[i_blg]\n blog['trusting'] = np_n_trusting[i_blg]\n blog['score'] = np_s_posts[i_blg] + np_s_notes[i_blg] + np_s_trusting[i_blg]\n\n if blog['score'] == \"+++\":\n blog['include'] = True\n else:\n blog['include'] = False\n\n blogs.append(blog)\n\n return blogs", "def get_data_source_identifiers(self):\n _identifiers = []\n for lang, feed in config.RSS_NEWS_FEEDS.items():\n logger.debug(u\"consultando press release (lang: %s), feed: %s\" % (lang, feed))\n feed_url_by_lang = feed['url'].format(lang) # ex: http://blog.scielo.org/en/feed/\n feed_entries_list = self.get_feed_entries(feed_url_by_lang)\n for raw_feed_entry in feed_entries_list:\n _identifiers.append(raw_feed_entry)\n return _identifiers", "def search_source(self,strz):\n\t\tfor src in sources_rip: #sources_rip = list of allow source words\n\t\t\tif src in strz:\n\t\t\t\tself.src_rip=src.replace(\".\",\"\")\n\t\t\t\treturn strz.replace(src,\"\")\n\t\treturn strz", "def process_article(title):\n strings = []\n for lang in languages:\n strings.append(get_page(title,lang))\n return article_stats(strings,title)" ]
[ "0.6476298", "0.5664169", "0.54164404", "0.5403179", "0.5313563", "0.51924914", "0.5011071", "0.49869028", "0.49731225", "0.49561828", "0.4951889", "0.49437946", "0.49339575", "0.49018767", "0.4884029", "0.48553824", "0.4843885", "0.48337176", "0.4828485", "0.48259106", "0.48203617", "0.4806273", "0.4793924", "0.4791327", "0.47907406", "0.47717434", "0.47419673", "0.4728108", "0.47197354", "0.47182512" ]
0.76582414
0
Given a language code in ParaCrawl, we find the "paired" file that contains monolingual tokenized data from that language. ParaCrawl is parallel data, so its input files refer to language pairs. In practice, each language pair is English and a nonEnglish language. So the result for most languages is that they are paired with English. English is paired with French, as that language pair yields the most text. A "paired" filename is tagged with both a language pair and a single language. All the text in the file is in that single language, but the filename also refers to the language pair that it came from. The other file from that language pair has corresponding lines in the same order, so you could 'paste' them together to get tabular parallel text, with text in one language and its translation in another. We sort the language codes to make them consistent with OPUS sources.
def paracrawl_language_pair_source(lang): if lang == 'en': other = 'fr' else: other = 'en' lang1, lang2 = sorted([lang, other]) langpair = '{}_{}'.format(lang1, lang2) return DATA + "/tokenized/paracrawl-paired/{langpair}.{lang}.txt".format(langpair=langpair, lang=lang)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def translationText(language, listOfWords):\n txt = open(language+\".txt\", mode=\"r\").readlines()\n translatedWords = []\n for word in listOfWords:\n for line in txt:\n if line.split()[0] == word:\n translatedWords.append(line.split()[1])\n return translatedWords", "def process(self):\n\n linelang = defaultdict(int)\n wordlang = defaultdict(int)\n\n linefont = defaultdict(int)\n wordfont = defaultdict(int)\n\n inputfiles = self.input_files\n for input_file in inputfiles:\n\n alignurl = input_file.url\n pcgts = parse(alignurl, True)\n page = pcgts.get_Page()\n regions = page.get_TextRegion()\n\n for region in regions:\n lines = region.get_TextLine()\n\n for line in lines:\n try:\n llang = line.primaryLanguage\n linelang[llang] += 1\n except TypeError:\n pass\n\n try:\n lfont = line.fontFamily\n linefont[lfont] += 1\n except TypeError:\n pass\n\n words = line.get_Word()\n for word in words:\n try:\n wlang = word.language\n wordlang[wlang] += 1\n except TypeError:\n pass\n\n try:\n wfont = word.get_TextStyle().fontFamily\n wordfont[wfont] += 1\n except TypeError:\n pass\n\n #predominant language\n try:\n lang = max(linelang, key=lambda k: linelang[k])\n except TypeError:\n try:\n lang = max(wordlang, key=lambda k: wordlang[k])\n except TypeError:\n lang = 'German'\n\n #predominant font\n try:\n font = max(linefont, key=lambda k: linefont[k])\n except TypeError:\n try:\n font = max(wordfont, key=lambda k: wordfont[k])\n except TypeError:\n font = 'Antiqua'\n\n\n print(lang)\n print(font)", "def convert(args):\n if args.unique:\n output_filepath = '{}.as.unique.wordpairs.txt'.format(\n args.data.split('.txt')[0])\n else:\n output_filepath = '{}.as.wordpairs.txt'.format(\n args.data.split('.txt')[0])\n pairs = []\n logger.info('Saving output to {}'.format(output_filepath))\n with open(args.data, 'r', encoding='utf-8') as input_stream:\n for line in input_stream:\n line = line.strip()\n seq = line.split('\\t')\n if len(seq[0].split()) == 1:\n pairs.append((seq[0], seq[1]))\n else:\n xtokens = seq[0].split()\n ytokens = seq[1].split()\n if len(xtokens) != len(ytokens):\n raise Exception(\n 'Invalid input sequences: should contain the same '\n 'number of tokens: \\n {} \\n {}'.format(seq[0], seq[1]))\n for xtoken, ytoken in zip(xtokens, ytokens):\n pairs.append((xtoken, ytoken))\n if args.unique:\n pairs = set(pairs)\n with open(output_filepath, 'w', encoding='utf-8') as output_str:\n for pair in sorted(pairs):\n print('{}\\t{}'.format(pair[0], pair[1]), file=output_str)\n # with open(args.data, 'r', encoding='utf-8') as input_stream:\n # with open(output_filepath, 'w', encoding='utf-8') as output_str:\n # for line in input_stream:\n # line = line.strip()\n # if line:\n # seq = line.split('\\t')\n # xtokens = seq[0].split()\n # ytokens = seq[1].split()\n # if len(xtokens) != len(ytokens):\n # raise Exception(\n # 'Invalid input sequences: should contain the same '\n # 'number of tokens: \\n {} \\n {}'.format(seq[0], seq[1]))\n # print('{}\\t{}'.format(' '.join(xtokens), ' '.join(ytokens)),\n # file=output_str)", "def shared_words_from_filenames(filename1, filename2):\r\n\r\n \"\"\"\r\n filename1 = tokenize(text1)\r\n filename2 = tokenize(text2)\r\n\r\n list3 = set(filename1) & set(filename2)\r\n\r\n return list3\r\n\r\n \"\"\"\r\n with open(filename1, encoding=\"utf8\") as f1, open(filename2, encoding=\"utf8\") as f2:\r\n\r\n wordsFile1 = [];\r\n wordsFile2 = [];\r\n result = [];\r\n\r\n lines = [line.strip() for line in f1] # create a set of words from file 1\r\n for line in lines:\r\n tokenizedline = tokenize(line.replace('\\ufeff', ''));\r\n for word in tokenizedline:\r\n wordsFile1.append(word);\r\n\r\n lines = [line.strip() for line in f2] # create a set of words from file 1\r\n for line in lines:\r\n tokenizedline = tokenize(line.replace('\\ufeff', ''));\r\n for word in tokenizedline:\r\n wordsFile2.append(word);\r\n\r\n # now loop over each line of other file\r\n\r\n for word in wordsFile1:\r\n if word in wordsFile2 and word != ' ': # if word in File 1 is found in File 2 then print it\r\n result.append(word)\r\n\r\n return result", "def read_hansard(train_dir, num_sentences):\n \"\"\"\n # TODO\n # Get starting files from directory:\n # Get file num\n # If file language is english get french with same num, & vice versa\n # load files into two lists: curr_english, curr_french\n # while count < num_sentences:\n # if index >= len(curr_english):\n # load two new files into curr_english and curr_french\n # make sure to keep track of files already read\n # index = 0 \n # sentences['e'][count] = preprocess(curr_english[index])\n # sentences['f'][count] = preprocess(curr_french[index])\n\n #====================================\n # Return (eng, fre) version:\n # Get starting files from directory:\n # Get file num\n # If file language is english get french with same num, & vice versa\n # load files into two lists: curr_english, curr_french\n # while count < num_sentences:\n # if index >= min(len(curr_english), len(curr_french)):\n # load two new files into curr_english and curr_french\n # make sure to keep track of files already read\n # index = 0\n # preprocess and remove SENTSTART and SENTEND from the sentences\n # eng[count] = eng_sentence.split()\n # fre[count] = fre_sentence.split()\n # return (eng, fre)\n \"\"\"\n\n files_examined = set()\n count = 0\n eng = []\n fre = []\n\n # for subdir, dirs, files in os.walk(train_dir):\n # for file in files:\n\n files = os.listdir(train_dir)\n for file in files:\n\n # First set up and validate the files\n file_name, extension = os.path.splitext(file)\n file_name, file_id = os.path.splitext(file_name)\n\n # Skip if not .e or .f file\n if not (extension == '.f' or extension == '.e'):\n continue\n\n # Skip if already examined this file pair\n if file_id in files_examined:\n continue\n\n # Skip if either language file is not available\n eng_file = file_name + file_id + '.e'\n fre_file = file_name + file_id + '.f'\n if eng_file not in files or fre_file not in files:\n continue\n\n # If it reaches here we know we can process it\n files_examined.add(file_id)\n print( \"Reading \" + str(count+1))\n\n # Finally open files and iterate simultaneously\n eng_path = os.path.join(train_dir, eng_file)\n fre_path = os.path.join(train_dir, fre_file)\n with open(eng_path) as english:\n with open(fre_path) as french:\n for E, F in zip(english, french):\n\n # Stop when limit reached\n if count >= num_sentences:\n return (eng, fre)\n\n # Process and split sentences\n E = preprocess(E.rstrip(), 'e')\n F = preprocess(F.rstrip(), 'f')\n\n E_words = E.split()\n F_words = F.split()\n\n eng.append(E_words)\n fre.append(F_words)\n\n count += 1\n\n return (eng, fre)", "def find_what_to_combine():\n import os.path\n index = os.path.join('input', 'en', 'index.txt')\n f = open(index)\n for line in f:\n line = line.strip()\n if line.startswith('section-pages:'): break\n if not line.startswith('section-pages:'):\n raise Exception(\"Could not find a section-pages line in \" + index.txt)\n line = line[14:]\n items = [index]\n for item in line.split(','):\n item = item.strip()\n if item: items.append(os.path.join('input', 'en', item+'.txt'))\n return items", "def readData(self, reverse=False):\n print(\"Reading lines...\")\n lines_input = open(self.input_file).read().strip().split('\\n')\n lines_target = open(self.target_file).read().strip().split('\\n')\n lines_input = lines_input[:self.maxdata]\n lines_target = lines_target[:self.maxdata]\n pairs = []\n for x, y in zip(lines_input[:self.maxdata], lines_target[:self.maxdata]):\n pairs.append([self.normalizeString(x), self.normalizeString(y)])\n if reverse:\n pairs = [list(reversed(p)) for p in pairs]\n input_lang = Lang(self.lang2)\n output_lang = Lang(self.lang1)\n else:\n input_lang = Lang(self.lang1)\n output_lang = Lang(self.lang2)\n for pair in pairs:\n input_lang.addSentence(pair[0])\n output_lang.addSentence(pair[1])\n pairs = self.filterPairs(pairs)\n return input_lang, output_lang, pairs", "def load_word_pairs(languages: ty.Tuple[str, str]) -> ty.Dict[str, ty.List[str]]:\n logg = logging.getLogger(f\"c.{__name__}.load_word_pairs\")\n # logg.setLevel(\"DEBUG\")\n logg.debug(\"Start load_word_pairs\")\n\n lang0 = pycountry.languages.get(name=languages[0])\n lang_alpha2_tag0 = lang0.alpha_2\n lang1 = pycountry.languages.get(name=languages[1])\n lang_alpha2_tag1 = lang1.alpha_2\n\n word_pairs_folder = get_package_folders(\"word_pairs\")\n lang_pairs_folder = word_pairs_folder / f\"{lang_alpha2_tag0}_{lang_alpha2_tag1}\"\n\n word_pairs_name_template = f\"{lang_alpha2_tag0}_{lang_alpha2_tag1}_{{}}.json\"\n\n all_word_pairs: ty.Dict[str, ty.List[str]] = {}\n\n for letter in ascii_lowercase:\n word_pairs_name = word_pairs_name_template.format(letter)\n word_pairs_path = lang_pairs_folder / word_pairs_name\n logg.debug(f\"word_pairs_path: {word_pairs_path}\")\n\n word_pairs_letter = json.loads(word_pairs_path.read_text(encoding=\"utf-8\"))\n\n for word0 in word_pairs_letter:\n\n # filter entries with more than one word\n if \" \" in word0:\n continue\n\n # add the whole list to the known dict\n all_word_pairs[word0] = word_pairs_letter[word0]\n\n logg.info(f\"len(all_word_pairs): {len(all_word_pairs)}\")\n\n return all_word_pairs", "def _list_file_pairs(\n prediction_files,\n dataset_dir,\n *,\n split,\n bucket,\n language,\n):\n file_pairs = []\n dataset_path = epath.Path(dataset_dir)\n bucket_path = dataset_path / f'{bucket}_bucket'\n primary_language = language.split('-')[0]\n match_name = f'{primary_language}_{bucket}_{split}_en_{language}'\n for prediction_file in prediction_files:\n for reference_path in bucket_path.iterdir():\n if str(reference_path.name).startswith(match_name):\n file_pairs.append(\n FilePair(\n bucket=bucket,\n prediction_path=prediction_file,\n reference_path=reference_path,\n )\n )\n return file_pairs", "def tokenize_by_language(in_file, out_dir, zipped=False, languages=FT_LANGUAGES):\n if zipped:\n out_files = {\n language: gzip.open(\n '%s/%s.txt.gz' % (out_dir, language), 'wt', encoding='utf-8'\n )\n for language in languages\n }\n else:\n out_files = {\n language: open('%s/%s.txt' % (out_dir, language), 'w', encoding='utf-8')\n for language in languages\n }\n try:\n for line in in_file:\n lang, text = line.rstrip().split('\\t', 1)\n if lang in languages:\n tokenized = tokenize(\n text, lang, include_punctuation=True, external_wordlist=True\n )\n out_file = out_files[lang]\n print(' '.join(tokenized), file=out_file)\n finally:\n for out_file in out_files.values():\n out_file.close()", "def match_word_sorted(code1, code2):\n list1 = code1.split(\" \")\n list2 = code2.split(\" \")\n set1 = set(list1)\n set2 = set(list2)\n common_words = set1 & set2\n try:\n common_words.remove(\"\")\n except:\n pass\n\n words_to_index = {}\n for word in common_words:\n in1 = list1.index(word)\n in2 = list2.index(word)\n words_to_index[word] = (in1, in2)\n sorted1 = OrderedDict(sorted(words_to_index.items(), key=lambda t: t[1][0])).keys()\n sorted2 = OrderedDict(sorted(words_to_index.items(), key=lambda t: t[1][1])).keys()\n\n a = Sequence(sorted1)\n b = Sequence(sorted2)\n v = Vocabulary()\n a_encoded = v.encodeSequence(a)\n b_encoded = v.encodeSequence(b)\n scoring = SimpleScoring(MATCH_SCORE, MISMATCH_SCORE)\n aligner = GlobalSequenceAligner(scoring, GAP_SCORE)\n score, encoders = aligner.align(a_encoded, b_encoded, backtrace=True)\n max_score = 0\n for i, encoded in enumerate(encoders):\n alignment = v.decodeSequenceAlignment(encoded)\n if alignment.score > max_score:\n max_score = alignment.score\n return max_score", "def tokenize(doc_list, language_code, core_num=multiprocessing.cpu_count()):\n param = [[d, language_code] for d in doc_list]\n pool = multiprocessing.Pool(core_num)\n return pool.map(_tokenize4map, param)", "def europarl_raw_data(\n data_path='bigdata/training',\n lang1='de-en-german.txt',\n lang2='de-en-english.txt',\n max_train_len=32,\n train_size=1600000,\n val_size=160000,\n):\n lang1_path = os.path.join(data_path, lang1)\n lang2_path = os.path.join(data_path, lang2)\n\n split_data = _train_val_test_split(\n [_read_lines(lang1_path), _read_lines(lang2_path)],\n train_size, val_size\n )\n lang1_train, lang1_val, lang1_test = split_data[0]\n lang2_train, lang2_val, lang2_test = split_data[1]\n lang1_idx2word, lang1_word2idx = _build_vocab_from_sentences(lang1_train)\n lang2_idx2word, lang2_word2idx = _build_vocab_from_sentences(lang2_train)\n lang1_train_vectorized = _convert_sentences_to_ids(\n lang1_train,\n lang1_word2idx\n )\n lang1_val_vectorized = _convert_sentences_to_ids(\n lang1_val,\n lang1_word2idx\n )\n lang1_test_vectorized = _convert_sentences_to_ids(\n lang1_test,\n lang1_word2idx\n )\n lang2_train_vectorized = _convert_sentences_to_ids(\n lang2_train,\n lang2_word2idx\n )\n X_train, y_train = _convert_to_numpy_by_length(\n lang1_train_vectorized,\n lang2_train_vectorized,\n max_train_len,\n )\n X_val = _convert_to_numpy(lang1_val_vectorized)\n X_test = _convert_to_numpy(lang1_test_vectorized)\n return {\n 'vocab': {\n 'lang1_idx2word': lang1_idx2word,\n 'lang1_word2idx': lang1_word2idx,\n 'lang2_idx2word': lang2_idx2word,\n 'lang2_word2idx': lang2_word2idx,\n },\n 'train': {\n 'X': X_train,\n 'y': y_train,\n },\n 'val': {\n 'X': X_val,\n 'y': lang2_val,\n },\n 'test': {\n 'X': X_test,\n 'y': lang2_test,\n },\n }", "def main(directory, csv_file, task_name):\n csv_data = pd.read_csv(csv_file)\n colnames = csv_data.columns.tolist()\n\n edat_files = glob.glob(directory + \"*.edat*\")\n text_files = glob.glob(directory + \"*-*.txt\")\n all_files = edat_files + text_files\n pairs = []\n paired_texts = []\n\n for text_file in text_files:\n [text_fname, _] = os.path.splitext(text_file)\n for edat_file in edat_files:\n [edat_fname, _] = os.path.splitext(edat_file)\n if text_fname == edat_fname:\n pairs.append([text_file, edat_file])\n\n for pair in pairs:\n paired_texts.append(pair[0])\n\n unpaired_texts = list(set(text_files) - set(paired_texts))\n three_files = []\n pop_idx = []\n\n # List of lists\n for i_file in range(len(unpaired_texts)):\n for j_pair in range(len(paired_texts)):\n if (unpaired_texts[i_file][:len(unpaired_texts[i_file])-6] in paired_texts[j_pair]):\n three_files.append([paired_texts[j_pair], pairs[j_pair][1],\n unpaired_texts[i_file]])\n pop_idx.append(i_file)\n\n for rm in reversed(pop_idx):\n unpaired_texts.pop(rm)\n\n # three_files is the text files and edats that form a triad (one edat, two\n # similarly named text files).\n for triad in three_files:\n for i_pair in reversed(range(len(pairs))):\n if triad[0:2] == pairs[i_pair]:\n pairs.pop(i_pair)\n\n two_texts = []\n all_two_texts = []\n two_text_pairs = []\n\n for i_file in range(len(unpaired_texts)):\n for j_file in range(i_file + 1, len(unpaired_texts)):\n if (unpaired_texts[i_file][:len(unpaired_texts[i_file])-6] in unpaired_texts[j_file]):\n all_two_texts.append(i_file)\n all_two_texts.append(j_file)\n two_text_pairs.append([i_file, j_file])\n\n all_two_texts = sorted(all_two_texts, reverse=True)\n\n # two_texts is the text files that pair with other text files.\n for i_pair in range(len(two_text_pairs)):\n two_texts.append([unpaired_texts[two_text_pairs[i_pair][0]],\n unpaired_texts[two_text_pairs[i_pair][1]]])\n\n for i_file in all_two_texts:\n unpaired_texts.pop(i_file)\n\n # one_text is the remaining un-paired text files.\n one_text = [[unpaired_texts[i_file]] for i_file in range(len(unpaired_texts))]\n\n # Determine subject IDs and timepoints for all files.\n # Assumes that files will be named according to convention\n # blahblahblah_[subj]-[tp].txt or blahblahblah-[subj]-[tp].txt.\n one_text_subjects = [get_subject(file_[0]) for file_ in one_text]\n one_text_timepoints = [get_timepoint(file_[0]) for file_ in one_text]\n two_text_subjects = [get_subject(pair[0]) for pair in two_texts]\n two_text_timepoints = [get_timepoint(pair[0]) for pair in two_texts]\n three_file_subjects = [get_subject(triad[0]) for triad in three_files]\n three_file_timepoints = [get_timepoint(triad[0]) for triad in three_files]\n pair_subjects = [get_subject(pair[0]) for pair in pairs]\n pair_timepoints = [get_timepoint(pair[0]) for pair in pairs]\n\n af_files = ([item for sublist in pairs for item in sublist] +\n [item for sublist in two_texts for item in sublist] +\n [item for sublist in three_files for item in sublist] +\n [item for sublist in one_text for item in sublist])\n\n one_edat = list(set(all_files) - set(af_files))\n one_edat = [[edat] for edat in one_edat]\n one_edat_subjects = [get_subject(file_[0]) for file_ in one_edat]\n one_edat_timepoints = [get_timepoint(file_[0]) for file_ in one_edat]\n\n all_subjects = (one_text_subjects + two_text_subjects + three_file_subjects +\n pair_subjects + one_edat_subjects)\n all_notetype = (([\"one_text\"] * len(one_text_subjects)) +\n ([\"two_texts\"] * len(two_text_subjects)) +\n ([\"three_files\"] * len(three_file_subjects)) +\n ([\"pair\"] * len(pair_subjects)) +\n ([\"one_edat\"] * len(one_edat_subjects)))\n all_timepoints = (one_text_timepoints + two_text_timepoints +\n three_file_timepoints + pair_timepoints +\n one_edat_timepoints)\n all_file_sets = one_text + two_texts + three_files + pairs + one_edat\n\n organized_dir = org_dir_dict.get(task_name)\n\n for i_subj in range(len(all_subjects)):\n month = timepoint_dict.get(task_name).get(all_timepoints[i_subj])\n files_note = note_dict.get(all_notetype[i_subj])\n if len(all_subjects) > 4:\n try:\n print(\"Successfully organized %s-%s\" % (all_subjects[i_subj], month))\n print(\"Moved:\")\n subject_id = all_subjects[i_subj]\n files = all_file_sets[i_subj]\n note = organize_files(subject_id, month, files, organized_dir)\n note.append(files_note)\n orged = 1\n orgedwhen = time.strftime(\"%Y/%m/%d\")\n orgedby = \"PY\"\n except IOError:\n print(\"%s-%s couldn't be organized.\" % (all_subjects[i_subj], all_timepoints[i_subj]))\n note = files_note\n orged = 0\n orgedwhen = \"\"\n orgedby = \"\"\n\n try:\n if all_notetype[i_subj] == \"pair\":\n print(\"Successfully converted %s-%s\" % (all_subjects[i_subj], all_timepoints[i_subj]))\n conved = 1\n convedwhen = time.strftime(\"%Y/%m/%d\")\n convedby = \"PY\"\n else:\n print(\"%s-%s couldn't be converted.\" % (all_subjects[i_subj], all_timepoints[i_subj]))\n conved = 0\n convedwhen = \"\"\n convedby = \"\"\n except IOError:\n print(\"%s-%s couldn't be converted.\" % (all_subjects[i_subj], all_timepoints[i_subj]))\n conved = 0\n convedwhen = \"\"\n convedby = \"\"\n else:\n print(\"%s-%s couldn't be organized.\" % (all_subjects[i_subj], all_timepoints[i_subj]))\n note = files_note\n orged = 0\n orgedwhen = \"\"\n orgedby = \"\"\n print(\"%s-%s couldn't be converted.\" % (all_subjects[i_subj], all_timepoints[i_subj]))\n conved = 0\n convedwhen = \"\"\n convedby = \"\"\n\n csv_data = add_subject(csv_data, all_subjects[i_subj],\n all_timepoints[i_subj], orged, orgedwhen, orgedby,\n conved, convedwhen, convedby, note)\n\n csv_data = csv_data[colnames]\n csv_data.to_csv(csv_file, index=False)", "def process_raw_phrases(file_path):", "def tokenize_file(\n infile, outfile, language, check_language=False, punctuation=False, ftfy=False\n):\n for line in infile:\n if ftfy:\n # Run all ftfy fixes, but don't let it introduce line breaks\n line = fix_text(line.rstrip()).replace('\\n', ' ')\n else:\n # Run only specific quick fixes from ftfy\n line = fix_surrogates(unescape_html(line.rstrip()))\n tokens = tokenize(\n line, language, include_punctuation=punctuation, external_wordlist=True\n )\n checked_lang = None\n if check_language:\n checked_lang, _confidence = detect_language_checked(line.rstrip())\n if (not check_language):\n print(' '.join(tokens), file=outfile)\n else: \n if langcodes.tag_distance(checked_lang, language) < 10:\n print(' '.join(tokens), file=outfile)", "def find_langs(args):\n infile = args.source\n langs = {}\n for line in infile:\n name_and_files = line.split()\n name = name_and_files[0]\n if name not in langs:\n langs[name] = []\n langs[name] += read_files.filter_files(name_and_files[1:])\n langs[args.unknown] += read_files.filter_files(args.classify)\n return langs", "def create_joint_mono_corpus(src_mono_fname, tgt_mono_fname, joint_mono_fname, src_lang, tgt_lang):\n\n with codecs.open(src_mono_fname,'r','utf-8') as srcfile, \\\n codecs.open(tgt_mono_fname,'r','utf-8') as tgtfile, \\\n codecs.open(joint_mono_fname,'w','utf-8') as jointfile : \n\n outlines=[]\n outlines.extend([ l for l in srcfile])\n outlines.extend([ uit.transliterate(l,tgt_lang,src_lang) for l in tgtfile])\n random.shuffle(outlines)\n\n for line in outlines: \n jointfile.write(line)", "def language_text_sources(lang):\n return [\n DATA + \"/tokenized/{source}/{lang}.txt\".format(source=source, lang=lang)\n for source in LANGUAGE_SOURCES[lang]\n if source in FULL_TEXT_SOURCES\n ]", "def file_to_words((filename, enc)):\n STOP_WORDS = {\n 'a', 'an', 'and', 'are', 'as', 'be', 'by', 'for', 'if', 'in',\n 'is', 'it', 'of', 'or', 'py', 'rst', 'that', 'the', 'to', 'with',\n }\n # TR = string.maketrans(string.punctuation, ' ' * len(string.punctuation)) # pour str\n unicode_punctuation_map = dict((ord(char), None) for char in string.punctuation) # pour unicode\n\n print(multiprocessing.current_process().name, 'reading', filename)\n output = []\n\n # with open(filename, 'r') as f: # t pour text mode (default), par opposition à b pour binary mode\n # for line in f:\n # line = _line.decode('utf8')\n with codecs.open(filename, 'r', encoding=enc) as f: # pas besoin de spécifier t ou b dans le mode d'ouverture\n for line in f:\n if line.lstrip().startswith('..'): # Skip rst comment lines\n continue\n elif line.lstrip().startswith('#'): # Skip # comment lines\n continue\n # line = line.translate(TR) # Strip punctuation pour string\n line = line.translate(unicode_punctuation_map) # Strip punctuation pour unicode\n for word in line.split():\n word = word.lower()\n if word.isalpha() and word not in STOP_WORDS:\n output.append((word, 1))\n return output", "def collate(filename):\r\n x=open(filename,\"r\")\r\n total_words=[]\r\n for line in x:\r\n line=line.strip(\"\\n\")\r\n line=line.split(\":\")\r\n if len(total_words)<1:\r\n total_words.append(line)\r\n else:\r\n x= len(total_words)\r\n if line[0] == total_words[x-1][0]:\r\n if int(line[1]) > int(total_words[x-1][len(total_words[x-1])-1]):\r\n total_words[x-1].append(line[1])\r\n else:\r\n total_words.append(line)\r\n y = open(\"collated_ids.txt\", \"w\")\r\n # for i in range(len(total_words)):\r\n # if len(total_words[i])<3:\r\n # total_words[i]=\":\".join(total_words[i])+\"\\n\"\r\n # else:\r\n # id=\" \".join(total_words[i][1:])\r\n # total_words[i]=total_words[i][0]+\":\"+id+\"\\n\"\r\n # y.writelines(total_words)\r\n for i in range(len(total_words)):\r\n id=\"\"\r\n for j in range(1,len(total_words[i])):\r\n id=id +total_words[i][j] +\" \"\r\n y.write(str(total_words[i][0]) + \":\" +str(id) + \"\\n\")", "def find_best_locale_match(locale, langcodes):\n\n score_map = { \"language\" : 1000,\n \"territory\": 100,\n \"script\" : 10,\n \"encoding\" : 1 }\n\n def get_match_score(locale, langcode):\n score = 0\n\n locale_parts = parse_langcode(locale)\n langcode_parts = parse_langcode(langcode)\n if not locale_parts or not langcode_parts:\n return score\n\n for part, part_score in score_map.iteritems():\n if locale_parts[part] and langcode_parts[part]:\n if locale_parts[part] == langcode_parts[part]:\n # match\n score += part_score\n else:\n # not match\n score -= part_score\n elif langcode_parts[part] and not locale_parts[part]:\n # langcode has something the locale doesn't have\n score -= part_score\n\n return score\n\n scores = []\n\n # get score for each langcode\n for langcode in langcodes:\n scores.append((langcode, get_match_score(locale, langcode)))\n\n # find the best one\n sorted_langcodes = sorted(scores, key=lambda item_score: item_score[1], reverse=True)\n\n # matches matching only script or encoding or both are not useful\n if sorted_langcodes and sorted_langcodes[0][1] > score_map[\"territory\"]:\n return sorted_langcodes[0][0]\n else:\n return None", "def contador(filepath: str = sys.argv[1], numero: int = 100):\n \n # extraemos el texto\n with open(filepath, \"r\", encoding = \"utf-8\") as file:\n texto = file.read()\n \n # creamos el documento tokenizado con spacy\n doc = nlp(texto)\n \n # definimos parametros \n etiquetas_de_ruido = [\"PROPN\", # nombre propio\n \"SPACE\", # espacio\n \"PUNCT\", # punctuación\n \"CONJ\", # conjugación\n \"AUX\", # auxiliar\n \"ADP\", # adposición (preposición ó posposición)\n \"ADV\", # adverbio\n \"DET\", # determinante\n \"INTJ\", # interjección\n \"SCONJ\", # conjunción subordinada\n \"PRON\", # pronombre\n \"X\", # otro\n ] \n minimo_de_caracteres = 2\n\n # \n def esRuido(token):\n \"\"\"\n Esta función define si una palabra (o token) es ruido o no.\n \"\"\"\n es_ruido = False\n if token.pos_ in etiquetas_de_ruido:\n es_ruido = True \n elif token.is_stop == True:\n es_ruido = True\n elif len(token.string) <= minimo_de_caracteres:\n es_ruido = True\n return es_ruido \n def limpiador(token, minuscula = True):\n if minuscula:\n token = token.lower()\n return token.strip()\n\n # contador\n cuenta_limpia = [limpiador(palabra.lemma_) for palabra in doc if not esRuido(palabra)]\n\n top_100 = Counter(cuenta_limpia).most_common(numero)\n \n # salvar el archivo limpio \n path, filename = os.path.split(filepath)\n parent_dir, data_dir = os.path.split(path)\n filename, extension = os.path.splitext(filename)\n if \"-\" in filename:\n filename = filename.split(\"-\")[0]\n filepath_out = os.path.join(parent_dir, \"processed\", f\"{filename}-top{numero}.csv\")\n\n with open(filepath_out, \"w\", encoding = \"utf-8\",) as file:\n file.write(\"palabra,cuenta\\n\")\n for i in range(numero):\n file.write(f\"{top_100[i][0]},{top_100[i][1]}\\n\")\n \n return filepath_out, filename", "def readNormalizer(language):\n\n encoding = None\n\n fname = os.path.join(nm_dir, '%s.txt' % language) \n if not os.path.exists(fname):\n return []\n\n lst = []\n for l in open(fname): \n if not l.strip(): continue\n\n mo = enc_reg.match(l)\n if mo:\n encoding= mo.group(1)\n continue\n\n if l.startswith('#'): continue\n\n fields = l.split()\n if len(fields) == 1:\n fields = (fields[0], '') # replace XX with ''\n\n k = unicode(fields[0], encoding) \n v = unicode(fields[1], encoding) \n\n lst.append((k, v))\n\n return lst", "def convert_to_t5_format(nlp, texts):\n\n inputs = []\n outputs = []\n original_texts = []\n\n for text, doc in zip(texts, nlp.pipe(texts, n_process=-1)):\n\n pairs = set()\n\n for chunk in doc.noun_chunks:\n if chunk.text == text:\n continue\n input_ = text[0 : chunk.start_char] + \"<extra_id_0> \" + text[chunk.end_char + 1 :]\n output = \"<extra_id_0> \" + chunk.text + \" <extra_id_1> </s>\"\n\n pairs.add((input_.strip(), output))\n\n for token in doc:\n left_edge_i = token.left_edge.i\n right_edge_i = token.right_edge.i\n chunk_length = right_edge_i - left_edge_i + 1\n if chunk_length / len(doc) > 0.5 or chunk_length > 10: # if chunk is too long, just skip it\n continue\n\n input_ = str(doc[:left_edge_i]) + \" <extra_id_0> \" + str(doc[right_edge_i + 1 :])\n output = \"<extra_id_0> \" + str(doc[left_edge_i : right_edge_i + 1]) + \" <extra_id_1> </s>\"\n\n pairs.add((input_.strip(), output))\n\n for token in doc:\n if token.pos_ in [\"NOUN\", \"PRON\", \"PROPN\"]: # we don't want to mask parts of noun chunks\n continue\n input_ = str(doc[: token.i]) + \" <extra_id_0> \" + str(doc[token.i + 1 :])\n output = \"<extra_id_0> \" + token.text + \" <extra_id_1> </s>\"\n\n pairs.add((input_.strip(), output))\n\n for (input_, output) in pairs:\n inputs.append(input_)\n outputs.append(output)\n original_texts.append(text)\n\n return inputs, outputs, original_texts", "def read_language(filehandle):\n from bs4 import BeautifulSoup as bs\n soup = bs(filehandle.read())\n primary_name = soup.find(\"meta\", property=\"og:title\")[\"content\"]\n alternate_names = soup.find(\"div\", class_=\"field-name-field-alternate-names\" ).find(\"div\", class_=[\"field-item\", \"even\"]).string.split(\", \")\n classification = soup.find(\"div\", class_=\"field-name-language-classification-link\").find(\"div\", class_=[\"field-item\", \"even\"]).string.split(\", \")\n dialects = soup.find(\"div\", class_=\"field-name-field-dialects\" ).find(\"div\", class_=[\"field-item\", \"even\"]).p.get_text()\n return ([unicode(primary_name)]+alternate_names, classification, dialects)", "def make_lexical_analysis (file_name_input: str, file_name_output: str) -> str:\n\n #open and read lines of input file (p-- code)\n file_input = open(file_name_input, 'r')\n lines = file_input.readlines()\n\n is_comment = False\n has_2_characters = False\n counter_lines = 1\n counter_errors = 0\n counter_characters = 0\n table_tokens = []\n lexical_message = \"\"\n lexical_error = \"\"\n final_word = \"\"\n\n\t#get each line\n for line in lines:\n\n #get words in a line\n words_in_line = line.strip().split(' ')\n\n #get sentences for words in a line\n for sentence in words_in_line:\n\n counter_characters = 0\n\n #get character in a sentence\n for character in sentence:\n\t #current character is a puctuation word?\n if (character in '=,;()+-/*' and is_comment==False and has_2_characters==False) or \\\n (character in ':<>' and is_comment==False and has_2_characters==False) or \\\n (is_comment==False and has_2_characters==True):\n\n lexical_message, counter_errors, has_2_characters, final_word, table_tokens, counter_characters = check_one_or_two_characters (character, final_word, sentence, counter_characters, counter_lines,\n counter_errors,is_comment, has_2_characters, lexical_message, table_tokens)\n\n\t #current final_word is 'end' and current character is '.' (i.e. end of p-- code)?\n elif (is_comment==False) and (final_word == 'end' and character == '.'):\n lexical_message, counter_errors, final_word, table_tokens = check_end_program (character, final_word, counter_errors, counter_lines, lexical_message, table_tokens)\n\n\t #check if the comment starts or ends\n elif ((is_comment == False) and (character == '{')) or ((is_comment == True) and (character == '}')):\n is_comment, line_comment = check_comments (character, is_comment, counter_lines)\n\n\t #final word is probably an identifier, a reserved word, an integer number or a real number!!!\n elif is_comment == False:\n final_word = final_word + character\n\n if final_word != \"\":\n #recognize the final_word and added to table\n lexical_message, counter_errors = add_final_word_to_table (table_tokens, lexical_message, final_word, counter_lines, counter_errors)\n final_word = \"\"\n\n\t #analyse next line\n counter_lines = counter_lines + 1\n\n\t#the comment was opened but not closed?\n if is_comment == True:\n lexical_message = lexical_message + final_word + ',' + get_message_lexical_error (4, counter_lines-1)\n counter_errors += 1\n\n\t#open output file and write lexical message\n \n file_output = open(file_name_output, 'w')\n file_output.write(lexical_errors)\n \n\n file_output.close()\n file_input.close()\n return table_tokens", "def read_file():\n\n file = input(\"Input file: \")\n\n try:\n pfile = open(file)\n except: # Checks if the file can be opened\n print(\"ERROR: Could not open file \" + file)\n sys.exit()\n\n pfile = pfile.readlines()\n\n preorder = pfile[0].split()\n inorder = pfile[1].split()\n encode = pfile[2].strip()\n\n return preorder, inorder, encode", "def write_complete_data():\n for file_name in os.listdir(SOURCE_PATH):\n if file_name in [\"root.json\", \"es-419.json\"]:\n # \"root\" is not a language, \"es-419\" doesn't contain spell-out rules\n continue\n full_source_path = os.path.join(SOURCE_PATH, file_name)\n full_target_path = os.path.join(TARGET_PATH, file_name.split(\".\")[0] + \".py\")\n full_supplementary_path = os.path.join(SUPPLEMENTARY_PATH, file_name)\n\n language_data = {key: {} for key in REQUIRED_NUMBERS_DATA}\n ordered_language_data = {key: {} for key in REQUIRED_NUMBERS_DATA}\n with open(full_source_path, \"r\") as source:\n data = json.load(source)\n try:\n requisite_data = data[\"rbnf\"][\"rbnf\"][\"SpelloutRules\"]\n except KeyError:\n logging.error(\n f\"\\\"['rbnf']['rbnf']['SpelloutRules']\\\" doesn't exist in {file_name}\"\n )\n continue\n\n for keys, vals in requisite_data.items():\n if _is_valid(keys):\n for key, val in vals.items():\n # Removing soft-hyphens from the source file.\n val = val.replace(\"\\xad\", \"\")\n _extract_information(key, val, language_data)\n\n with open(full_supplementary_path, \"r\") as supplementary_data:\n data = json.load(supplementary_data)\n for keys in REQUIRED_NUMBERS_DATA:\n language_data[keys].update(data[keys])\n sorted_tuples = sorted(\n language_data[keys].items(), key=lambda x: (x[1], x[0])\n )\n for items in sorted_tuples:\n word, number = items[0], items[1]\n ordered_language_data[keys][word] = int(number)\n ordered_language_data[\"SKIP_TOKENS\"] = sorted(data[\"SKIP_TOKENS\"])\n try:\n ordered_language_data[\"USE_LONG_SCALE\"] = data[\"USE_LONG_SCALE\"]\n except KeyError:\n logging.error(f\"long_scale information missing in {file_name}\")\n\n translation_data = json.dumps(\n ordered_language_data, indent=4, ensure_ascii=False\n )\n # Overwriting boolean value with capitalized form\n translation_data = re.sub(r\"\\bfalse\\b\", \"False\", translation_data)\n translation_data = re.sub(r\"\\btrue\\b\", \"True\", translation_data)\n out_text = \"info = \" + translation_data + \"\\n\"\n with open(full_target_path, \"w+\") as target_file:\n target_file.write(out_text)", "def learn(filename):\n word_dict = {} # Create empty dictionary\n first = None\n prev = None\n with open(filename, 'r', encoding='utf-8') as file:\n for line in file:\n list_words = line.lower().split()\n text = []\n for word in list_words:\n # take out leading and trailing punctuation characters\n words = word.strip(string.punctuation + string.digits)\n word_len = len(words)\n if word_len >= 1:\n text.append(words)\n\n if first is None:\n # Get the first word in the text file\n first = text[0]\n # iterate over text\n if prev:\n text.insert(0, prev)\n for counter, word in enumerate(text):\n if word not in word_dict:\n word_dict[word] = list()\n if counter < (len(text) - 1):\n following = counter + 1\n word_dict[word].append(text[following])\n prev = text[-1]\n return first, word_dict # return a tuple" ]
[ "0.55892974", "0.5528118", "0.5515096", "0.54099923", "0.5397135", "0.537212", "0.5364681", "0.53629816", "0.5347864", "0.5310293", "0.52782893", "0.5263946", "0.5245933", "0.52381843", "0.5238012", "0.5229207", "0.52261126", "0.5188704", "0.518311", "0.51773405", "0.5173565", "0.51683384", "0.51594627", "0.5159281", "0.515118", "0.51389116", "0.5121148", "0.51108843", "0.5107462", "0.51004624" ]
0.76984686
0
Get trigger by trigger id and start git workflow associated.
def trigger_workflow(self, trigger_id, commit_sha='', status_url=None, collab_url=None): # Note: self.context will be None at this point as this is a # non-authenticated request. db_obj = objects.registry.Assembly.get_by_trigger_id(None, trigger_id) try: # get the trust\impersonation context and authenticate it. self.context = keystone_utils.create_delegation_context( db_obj, self.context) except exception.AuthorizationFailure as auth_ex: LOG.warning(auth_ex) return plan_obj = objects.registry.Plan.get_by_id(self.context, db_obj.plan_id) artifacts = plan_obj.raw_content.get('artifacts', []) for arti in artifacts: if repo_utils.verify_artifact(arti, collab_url): self._build_artifact(assem=db_obj, artifact=arti, commit_sha=commit_sha, status_url=status_url)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_trigger_by_id(self, trigger_id):\n return self.triggers[trigger_id]", "def fetch_by_id(self, trigger_id):\n result = self._client.get(self._full_path(trigger_id + '/state'))\n if 'state' in result:\n trigger = self._client.get(self._full_path(trigger_id))\n return Trigger(self._client, **trigger)\n elif not 'trigger_id' in result:\n raise ResponseStructureError(\"invalid api response\", result)", "def trigger_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"trigger_id\")", "def get_trigger(uuid: UUID, trigger_index: int) -> Optional['Trigger']:\n scenario = store.get_scenario(uuid)\n if scenario and trigger_index < len(scenario.trigger_manager.triggers):\n return scenario.trigger_manager.triggers[trigger_index]\n return None", "def create_trigger_task(\n main_dag: models.DAG,\n trigger_dag_id: str) -> dagrun_operator.TriggerDagRunOperator:\n return dagrun_operator.TriggerDagRunOperator(\n task_id=f'trigger-{trigger_dag_id}',\n trigger_dag_id=trigger_dag_id,\n dag=main_dag)", "def get_single_trigger(self, trigger_id, full=False):\n if full:\n path = \"triggers/trigger/{}\".format(trigger_id)\n else:\n path = \"triggers/{}\".format(trigger_id)\n entity = self._get(path=path)\n if not entity:\n return None\n trigger = self._convert_trigger(entity)\n c_entities = entity.get('conditions', None)\n if c_entities:\n for c_entity in c_entities:\n trigger.conditions.append(self._convert_condition(c_entity))\n d_entities = entity.get('dampenings', None)\n if d_entities:\n for d_entity in d_entities:\n trigger.dampenings.append(self._convert_dampening(d_entity))\n return trigger", "def fire_trigger(self, trigger):\n if not self.exists():\n return\n if trigger in self.events:\n for action in self.events[trigger]:\n action(requestor=self)", "def createTrigger(self):\n return _libsbml.Model_createTrigger(self)", "def trigger_id(self, trigger_id):\n\n self._trigger_id = trigger_id", "def trigger_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"trigger_id\")", "def build_trigger(ctx, build_type_id, branch, comment, parameter, agent_id,\n open_build_log, wait_for_run):\n parameters = dict([p.split('=', 1) for p in parameter])\n data = ctx.obj.trigger_build(\n build_type_id=build_type_id,\n branch=branch,\n comment=comment,\n parameters=parameters,\n agent_id=agent_id)\n build_id = data['id']\n ctx.invoke(build_queue_show, args=[build_id])\n if open_build_log:\n url = data['webUrl'] + '&tab=buildLog'\n webbrowser.open(url)\n if not wait_for_run:\n return\n while data['state'] == 'queued':\n data = ctx.obj.get_queued_build_by_build_id(build_id)\n click.echo('state: %s' % data['state'])\n time.sleep(1)\n ctx.invoke(build_queue_show, args=[build_id])", "def trigger(self, cmd: str, requestor: Identifier, extra_context=None) -> Optional[Flow]:\n flow, next_step = self.check_inflight_flow_triggered(cmd, requestor)\n if not flow:\n flow, next_step = self._check_if_new_flow_is_triggered(cmd, requestor)\n if not flow:\n return None\n\n flow.advance(next_step, enforce_predicate=False)\n if extra_context:\n flow.ctx = dict(extra_context)\n self._enqueue_flow(flow)\n return flow", "def getTrigger(self, *args):\n return _libsbml.Event_getTrigger(self, *args)", "def get_callback_trigger(callback_context):\n if not callback_context.triggered:\n trigger_id = None\n else:\n trigger_id = callback_context.triggered[0][\"prop_id\"].split(\".\")[0]\n\n return trigger_id", "def Trigger(self):\n\t\tpayload = { \"Arg1\": self.href }\n\t\treturn self._execute('trigger', payload=payload, response_object=None)", "def createTrigger(self):\n return _libsbml.Event_createTrigger(self)", "def trigger(self, sid):\r\n return Trigger(self, sid)", "def trigger(self):\n index = self._ordered_input_names.index('trigger')\n return self._inputs[index]", "def configure_trigger(i, cam, trigger):\n\n\tprint('\\n\\t*** CONFIGURING TRIGGER ***')\n\ttry:\n\t\tresult = True\n\n\t\t# Ensure trigger mode off\n\t\t# The trigger must be disabled in order to configure whether the source\n\t\t# is software or hardware.\n\t\tif cam.TriggerMode.GetAccessMode() != PySpin.RW:\n\t\t\tprint('Unable to disable trigger mode (node retrieval). Aborting...')\n\t\t\treturn False\n\n\t\tcam.TriggerMode.SetValue(PySpin.TriggerMode_Off)\n\n\t\tprint('\\t\\tCamera {} Trigger mode disabled...'.format(i))\n\n\t\t# Select trigger source\n\t\t# The trigger source must be set to hardware or software while trigger\n\t\t# mode is off.\n\t\tif cam.TriggerSource.GetAccessMode() != PySpin.RW:\n\t\t\tprint('Unable to get trigger source (node retrieval). Aborting...')\n\t\t\treturn False\n\n\t\tif trigger == 'software':\n\t\t\tcam.TriggerSource.SetValue(PySpin.TriggerSource_Software)\n\t\telif trigger == 'hardware':\n\t\t\tcam.TriggerSource.SetValue(PySpin.TriggerSource_Line3)\n\t\t\t# result &= trigger_selector(cam, 'FrameStart')\n\t\t\tresult &= trigger_overlap(2, cam, 'ReadOut')\n\n\t\tprint('\\t\\tCamera {} trigger source set to {}...'.format(i, trigger))\n\n\t\t# Turn trigger mode on\n\t\t# Once the appropriate trigger source has been set, turn trigger mode\n\t\t# on in order to retrieve images using the trigger.\n\t\tcam.TriggerMode.SetValue(PySpin.TriggerMode_On)\n\t\tprint('\\t\\tCamera {} Trigger mode turned back on...'.format(i))\n\n\texcept PySpin.SpinnakerException as ex:\n\t\tprint('Error: %s' % ex)\n\t\treturn False\n\n\treturn result", "def _get_trigger(self, cursor):\n raise NotImplementedError", "def open_modal(trigger_id, blocks):\n\n if not settings.SLACK_TOKEN:\n return None\n\n client = WebClient(token=settings.SLACK_TOKEN)\n\n try:\n response = client.views_open(trigger_id=trigger_id, view=blocks)\n assert response['ok'] is True\n return response['view']['id']\n except SlackApiError as e:\n assert e.response['ok'] is False\n return None", "def thread_trigger_id(self) -> str | None:\n trigger_id = None\n if hasattr(threading.current_thread(), 'trigger_id'):\n trigger_id = threading.current_thread().trigger_id # type: ignore\n return trigger_id", "def findPluginFromTrigger(self, trigger):\n\t\ttrigger = trigger.lower() # lowercase!\n\t\t\n\t\t# Loop through all plugins.\n\t\tfor plugin_name in self.plugins:\n\t\t\tplugin = self.getPlugin(plugin_name)\n\t\t\t\n\t\t\t# Check if the plugin has that trigger.\n\t\t\tif plugin.hasCommand(trigger):\n\t\t\t\treturn plugin_name\n\t\t\n\t\t# Not found :(\n\t\treturn None", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Trigger':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = TriggerArgs.__new__(TriggerArgs)\n\n __props__.__dict__[\"channel\"] = None\n __props__.__dict__[\"conditions\"] = None\n __props__.__dict__[\"create_time\"] = None\n __props__.__dict__[\"destination\"] = None\n __props__.__dict__[\"etag\"] = None\n __props__.__dict__[\"event_data_content_type\"] = None\n __props__.__dict__[\"event_filters\"] = None\n __props__.__dict__[\"labels\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"project\"] = None\n __props__.__dict__[\"service_account\"] = None\n __props__.__dict__[\"transport\"] = None\n __props__.__dict__[\"trigger_id\"] = None\n __props__.__dict__[\"uid\"] = None\n __props__.__dict__[\"update_time\"] = None\n return Trigger(resource_name, opts=opts, __props__=__props__)", "def change_trigger(self, changes_dict):\n if changes_dict.get('deleted', False):\n #override deleted behavior on consumers that care/deal with deletions\n return None\n return self.couch_db.open_doc(changes_dict['id'])", "def trigger(self, journey_id, step_id, data):\n self.journey_id = journey_id\n self.step_id = step_id\n if 'email_address' not in data:\n raise KeyError('The automation email queue must have an email_address')\n\n check_email(data['email_address'])\n response = self._mc_client._post(\n url=self._build_path(\"journeys\", journey_id, 'steps', step_id, 'actions', \"trigger\"),\n data=data\n )\n\n return response", "def task_trigger(self, args):\n h, tmp = tempfile.mkstemp(\n dir=self._tmpdir, prefix='trigger_raw', suffix='.json')\n os.close(h)\n cmd = [\n '-user',\n 'joe@localhost',\n '-d',\n 'pool=default',\n '-dump-json',\n tmp,\n ]\n cmd.extend(args)\n assert not self._run_swarming('trigger',\n cmd), 'Failed to trigger a task. cmd=%s' % cmd\n with open(tmp, 'rb') as f:\n data = json.load(f)\n task_id = data['tasks'][0]['task_id']\n logging.debug('task_id = %s', task_id)\n return task_id", "def set_TriggerID(self, value):\n super(UpdateTriggerInputSet, self)._set_input('TriggerID', value)", "def auto_trigger_selection(self):\n LGR.info(\"Running automatic trigger detection.\")\n LGR.info(\"Matching channel names with known trigger names first.\")\n joint_match = \"§\".join(TRIGGER_NAMES)\n indexes = []\n for n, case in enumerate(self.ch_name):\n name = re.split(r\"(\\W+|\\d|_|\\s)\", case)\n name = list(filter(None, name))\n\n if re.search(\"|\".join(name), joint_match, re.IGNORECASE):\n indexes = indexes + [n]\n\n if indexes:\n if len(indexes) > 1:\n raise Exception(\n \"More than one possible trigger channel was automatically found. \"\n \"Please run phys2bids specifying the -chtrig argument.\"\n )\n else:\n self.trigger_idx = int(indexes[0])\n else:\n # Time-domain automatic trigger detection\n LGR.info(\"Find the trigger channel by measuring data distance from its value limits.\")\n # Create numpy array with all channels (excluding time)\n channel_ts = np.array(self.timeseries[1:])\n\n # Normalize each signal to [0,1]\n min_ts = np.min(channel_ts, axis=1)[:, None]\n max_ts = np.max(channel_ts, axis=1)[:, None]\n channel_ts = (channel_ts - min_ts) / (max_ts - min_ts)\n\n # Compute distance to the closest signal limit (0 or 1)\n distance = np.minimum(abs(channel_ts - 0), abs(channel_ts - 1))\n distance_mean = np.mean(distance, axis=1)\n\n # Set the trigger as the channel with the smallest distance\n self.trigger_idx = int(np.nanargmin(distance_mean) + 1)\n\n LGR.info(f\"{self.ch_name[self.trigger_idx]} selected as trigger channel\")", "def _determine_trigger_objective(revision, buildername):\n builder_to_trigger = None\n files = None\n repo_name = query_repo_name_from_buildername(buildername)\n\n build_buildername = determine_upstream_builder(buildername)\n\n assert valid_builder(build_buildername), \\\n \"Our platforms mapping system has failed.\"\n\n if build_buildername == buildername:\n # For a build job we know that we don't need files to\n # trigger it and it's the build job we want to trigger\n return build_buildername, None\n\n # Let's figure out which jobs are associated to such revision\n all_jobs = query_jobs(repo_name, revision)\n # Let's only look at jobs that match such build_buildername\n build_jobs = _matching_jobs(build_buildername, all_jobs)\n\n # We need to determine if we need to trigger a build job\n # or the test job\n working_job = None\n running_job = None\n failed_job = None\n\n LOG.debug(\"List of matching jobs:\")\n for job in build_jobs:\n # Successful, running and failed jobs may have the files we need\n files = _find_files(job)\n if files != [] and _all_urls_reachable(files):\n working_job = job\n break\n else:\n LOG.debug(\"We can't determine the files for this build or \"\n \"can't reach them.\")\n files = None\n\n try:\n status = buildapi.query_job_status(job)\n except buildjson.BuildjsonException:\n LOG.debug(\"We have hit bug 1159279 and have to work around it. We will pretend that \"\n \"we could not reach the files for it.\")\n continue\n\n if status == buildapi.RUNNING:\n LOG.debug(\"We found a running build job. We don't search anymore.\")\n running_job = job\n\n else:\n LOG.info(\"We found a job that finished but its status \"\n \"is not successful. status: %d\" % status)\n failed_job = job\n\n if working_job:\n # We found a build job with the necessary files. It could be a\n # successful job, a running job that already emitted files or a\n # testfailed job\n LOG.debug(str(working_job))\n LOG.info(\"We have the necessary files to trigger the downstream job.\")\n # We have the files needed to trigger the test job\n builder_to_trigger = buildername\n elif running_job:\n LOG.info(\"We found a running build job without files. We will not trigger another one. \"\n \"You have to run the script again after the build job is finished to trigger %s.\" %\n buildername)\n builder_to_trigger = None\n elif failed_job:\n LOG.info(\"The build job %s failed on revision %s without generating the necessary files. \"\n \"We will not trigger anything.\" % (build_buildername, revision))\n builder_to_trigger = None\n else:\n # We were trying to build a test job, however, we determined\n # that we need an upstream builder instead\n if not _unique_build_request(build_buildername, revision):\n # This is a safeguard to prevent triggering a build\n # job multiple times if it is not intentional\n builder_to_trigger = None\n else:\n LOG.info(\"We will trigger 1) '%s' instead of 2) '%s'\" % (build_buildername, buildername))\n LOG.info(\"We need to trigger the build job once (1) in order to be able to run the test job (2).\")\n if repo_name == 'try':\n LOG.info(\"You'll need to run the script again after (1) is done to trigger (2).\")\n else:\n LOG.info(\"After (1) is done every test job associated with it will be triggered.\")\n builder_to_trigger = build_buildername\n\n return builder_to_trigger, files" ]
[ "0.6945729", "0.62667257", "0.5965356", "0.5943977", "0.5858999", "0.5767159", "0.5696369", "0.56085205", "0.55933714", "0.55431855", "0.5526502", "0.5483214", "0.54804486", "0.5407957", "0.5399822", "0.53422755", "0.5314913", "0.5291426", "0.5261431", "0.51868975", "0.51251394", "0.5093626", "0.50327325", "0.4967247", "0.49656218", "0.49561438", "0.49440536", "0.49381924", "0.49368313", "0.49315304" ]
0.6352503
1
Return all assemblies, based on the query provided.
def get_all(self): return objects.registry.AssemblyList.get_all(self.context)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def package_all(q):\n\n query = (q.dict_of_lists())[\"q\"][0]\n datasets = p.toolkit.get_action(\"package_search\")(\n {}, data_dict={\"q\": query, \"include_private\": True}\n )\n\n result = datasets[\"results\"]\n results = []\n for res in result:\n results.append(res)\n return results", "def getAssembliesOfType(self, typeSpec, exactMatch=False):\n return self.getChildrenWithFlags(typeSpec, exactMatch=exactMatch)", "def find(self, datastore, kwargs):\n query = self.assembly_query(datastore, kwargs)\n return query.all()", "def getAssemblies(\n self,\n typeSpec=None,\n sortKey=None,\n includeBolAssems=False,\n includeSFP=False,\n includeAll=False,\n zones=None,\n exact=False,\n ):\n if includeAll:\n includeBolAssems = includeSFP = True\n\n assems = []\n if (\n includeBolAssems\n and self.parent is not None\n and self.parent.blueprints is not None\n ):\n assems.extend(self.parent.blueprints.assemblies.values())\n\n assems.extend(a for a in sorted(self, key=sortKey))\n\n if includeSFP and self.parent is not None and self.parent.sfp is not None:\n assems.extend(self.parent.sfp.getChildren())\n\n if typeSpec:\n assems = [a for a in assems if a.hasFlags(typeSpec, exact=exact)]\n\n if zones:\n zoneLocs = self.zones.getZoneLocations(zones)\n assems = [a for a in assems if a.getLocation() in zoneLocs]\n\n return assems", "def list_assemblies(self):\n # Get remarks listing available assemblies\n remark_lines = self.get_remark(300)\n if remark_lines is None:\n raise InvalidFileError(\n \"File does not contain assembly information (REMARK 300)\"\n )\n return [\n assembly_id.strip() \n for assembly_id in remark_lines[0][12:].split(\",\")\n ]", "def get_assemblies(term, download=True, path='assemblies'):\n\n from Bio import Entrez\n #provide your own mail here\n Entrez.email = \"[email protected]\"\n handle = Entrez.esearch(db=\"assembly\", term=term, retmax='200')\n record = Entrez.read(handle)\n ids = record['IdList']\n print (f'found {len(ids)} ids')\n links = []\n for id in ids:\n #get summary\n summary = get_assembly_summary(id)\n #get ftp link\n url = summary['DocumentSummarySet']['DocumentSummary'][0]['FtpPath_RefSeq']\n if url == '':\n continue\n label = os.path.basename(url)\n #get the fasta link - change this to get other formats\n link = os.path.join(url,label+'_genomic.fna.gz')\n print (link)\n links.append(link)\n if download == True:\n #download link\n urllib.request.urlretrieve(link, f'{label}.fna.gz')\n return links", "def get_all(class_name):\n result = class_name.query.all()\n return result", "def list_assemblies(cls) -> list:\n# return [cls.CONSENSUS_FILE, cls.CONTIGS_FILE, cls.SCAFFOLDS_FILE];\n return [cls.CONSENSUS, cls.CONTIGS, cls.SCAFFOLDS];", "def get_catalogs_by_query(self, *args, **kwargs):\n # Implemented from kitosid template for -\n # osid.resource.BinQuerySession.get_bins_by_query_template\n return self._get_provider_session('catalog_query_session').get_catalogs_by_query(*args, **kwargs)", "def getAssemblies(pth):\n if pth.lower().endswith(\".manifest\"):\n return []\n # check for manifest file\n manifestnm = pth + \".manifest\"\n if os.path.isfile(manifestnm):\n with open(manifestnm, \"rb\") as fd:\n res = {RT_MANIFEST: {1: {0: fd.read()}}}\n else:\n # check the binary for embedded manifest\n try:\n res = GetManifestResources(pth)\n except winresource.pywintypes.error as exc:\n if exc.args[0] == winresource.ERROR_BAD_EXE_FORMAT:\n logger.info('Cannot get manifest resource from non-PE '\n 'file %s', pth)\n return []\n raise\n rv = []\n if RT_MANIFEST in res and len(res[RT_MANIFEST]):\n for name in res[RT_MANIFEST]:\n for language in res[RT_MANIFEST][name]:\n # check the manifest for dependent assemblies\n try:\n manifest = Manifest()\n manifest.filename = \":\".join([pth, str(RT_MANIFEST),\n str(name), str(language)])\n manifest.parse_string(res[RT_MANIFEST][name][language],\n False)\n except Exception as exc:\n logger.error(\"Can not parse manifest resource %s, %s\"\n \" from %s\", name, language, pth, exc_info=1)\n else:\n if manifest.dependentAssemblies:\n logger.debug(\"Dependent assemblies of %s:\", pth)\n logger.debug(\", \".join([assembly.getid()\n for assembly in\n manifest.dependentAssemblies]))\n rv.extend(manifest.dependentAssemblies)\n return rv", "def _getAssembliesByName(self):\n runLog.extra(\"Generating assemblies-by-name map.\")\n\n # NOTE: eliminated unnecessary repeated lookups in self for self.assembliesByName\n self.assembliesByName = assymap = {}\n # don't includeAll b/c detailed ones are not ready yet\n for assem in self.getAssemblies(includeBolAssems=True, includeSFP=True):\n aName = assem.getName()\n if aName in assymap and assymap[aName] != assem:\n # dangerous situation that can occur in restart runs where the global assemNum isn't updated.\n # !=assem clause added because sometimes an assem is in one of the includeAll lists that is also in the\n # core and that's ok.\n runLog.error(\n \"Two (or more) assemblies in the reactor (and associated lists) have the name {0},\\n\"\n \"including {1} and {2}.\".format(aName, assem, assymap[aName])\n )\n raise RuntimeError(\"Assembly name collision.\")\n\n assymap[aName] = assem", "def refine_assemblies(self, assemblies):\n refined_assemblies = []\n\n for species in self.species_list:\n scientific_name, common_name, taxid = species\n if scientific_name not in assemblies:\n continue\n asms = assemblies[scientific_name]\n for asm in asms:\n (assembly_name, accession, release_date) = asm\n row = [\n scientific_name,\n common_name,\n taxid,\n assembly_name,\n accession,\n release_date,\n ]\n refined_assemblies.append(row)\n\n return refined_assemblies", "def query_all(self):\n return multisearch.queries.QueryAll().connect(self)", "def search_catalog(self, query):\n scope = datacatalog.SearchCatalogRequest.Scope()\n scope.include_project_ids.append(self.__project_id)\n\n request = datacatalog.SearchCatalogRequest()\n request.scope = scope\n request.query = query\n request.page_size = 1000\n\n return [\n result for result in self.__datacatalog.search_catalog(request)\n ]", "def get_all_ads(query, sort_by='date', by_title=False, with_images=False, owner=None):\n search_url = generate_search_url(query, sort_by, by_title, with_images, owner)\n for page, page_number in get_pages(search_url):\n for ad in get_ads_from_page(page):\n yield agregate_ad_info(ad, page_number)", "def create_all_folders_query():\n qry_text = \"<Where><Eq><FieldRef Name=\\\"FSObjType\\\" /><Value Type=\\\"Integer\\\">1</Value></Eq></Where>\"\n return CamlQuery.parse(qry_text, ViewScope.RecursiveAll)", "def getAssemblyByName(self, name):\n return self.assembliesByName[name]", "def get_query_set(self):\n return ArchiverQuerySet(self.model, using=self._db)", "def get_apps(provider, query):\n\n workdir = os.path.dirname(os.path.realpath(__file__))\n with open(os.path.join(workdir, '..', 'config.yml')) as f:\n config = yaml.load(f)\n ex = Explorer()\n logging.info('Read bucket: %s', config['SCOOP_BUCKET'])\n apps = ex.get_apps(os.path.expandvars(config['SCOOP_BUCKET']), query)\n logging.info(\"Apps count = %d\", len(apps))\n installed = provider.get_installed()\n\n # check if already installed\n for app in apps:\n app['installed'] = app['name'] in installed\n\n return apps", "def search_for_books(self, query):\n books = []\n book = Book(self.db)\n for row in self.db.cursor().execute('SELECT genre_id FROM genres WHERE ' + query):\n books.extend(self.get_books(row[0]))\n\n return books", "def locateAllAssemblies(self):\n for a in self.getAssemblies(includeAll=True):\n a.lastLocationLabel = a.getLocation()", "def find(query):\n acc = []\n for root, dirs, files in os.walk(query, topdown=False):\n for name in files:\n acc += [os.path.join(root, name)]\n return acc", "def create_all_files_query():\n qry_text = \"<Where><Eq><FieldRef Name=\\\"FSObjType\\\" /><Value Type=\\\"Integer\\\">0</Value></Eq></Where>\"\n return CamlQuery.parse(qry_text, ViewScope.RecursiveAll)", "def all(klass):\n return klass.find()", "def query_all(self, search_query = \"all\", request = -1, batch = 1000):\n\n # send to Arxiv to learn total number of papers complying the filter\n trial_feed = self.send_query(search_query, 0, 1)\n feedtotal = int(trial_feed.feed.opensearch_totalresults)\n print('Total results for this query: %i' % feedtotal)\n\n # create lists which will be converted to parquet files\n al = []\n cl = []\n\n # if request is not provided, set req to feedtotal\n req = feedtotal if request == -1 else min(request,feedtotal)\n t = batch\n totalresults = 0\n\n # send queries until req is fulfilled\n while totalresults < req: #(t == batch and totalresults < req) or (request == -1 and totalresults < feedtotal):\n feed = self.send_query(search_query, totalresults, batch)\n al = self.read_authors(feed, al)\n cl = self.read_collabs(feed, al, cl)\n t = len(feed.entries)\n # al, cl, t, _ = query(search_query, al, cl, start = totalresults, max_results = batch, verbose = False)\n # al = al + a\n # cl = cl + c\n totalresults = totalresults + t\n print(\"Query returned: \", t)\n print(\"Total results:%i\" % totalresults)\n # if t == 0: t = batch #potential bug\n\n # create a dataframe containing author ids and names\n schema_a = StructType([\n StructField(\"id\", IntegerType(), True),\n StructField(\"name\", StringType(), True)\n ])\n self.author_df = self.create_df(al, schema_a)\n\n # create a dataframe containing collaborations\n schema_c = StructType([\n StructField(\"src\", IntegerType(), True),\n StructField(\"dest\", IntegerType(), True),\n StructField(\"arxiv\", StringType(), True),\n StructField(\"title\", StringType(), True)\n ])\n self.collab_df = self.create_df(cl, schema_c)\n\n # print results to parquet\n print(\"Query completed, length of unique authors: \", self.author_df.count() )\n print(\"Length of collabs: \", self.collab_df.count() )\n self.author_df.write.mode('overwrite') \\\n .parquet(\"Data/authors-%s-total%i.parquet\" % (search_query.replace(\":\",\"\"), totalresults))\n self.collab_df.write.mode('overwrite') \\\n .parquet(\"Data/collab-%s-total%i.parquet\" % (search_query.replace(\":\",\"\"), totalresults))\n print(\"Parquet written.\")", "def getAMsFromQuery(query):\n string = \"\"\"curl -X POST -H \"Content-Type: application/json\" -d '{{\"query\": \"{query}\"}}' tni-test.cern.ch/search/search\"\"\".format(query=query)\n result = json.loads(os.popen(string).read())\n return result", "def find_archived_project_files(self,\n\t\tsearch_old_apps=True,\n\t\tsearch_consolidate=False,\n\t\tsearch_backup=False):\n\t\tresults = []\n\t\tif search_old_apps:\n\t\t\tresults = self._search_old_applications(self.project)\n\n\t\tyear = Tina.parse_year(self.project)\n\t\t# archive pool - strategy 'A'\n#\t\tif self.base_dir == 'flame_archive':\n\t\tif self.base_dir:\n\t\t\t#print \"Searching flame_archive in archive pool\"\n\t\t\tobj = self._get_project_tina_entries(pool='archive')\n\t\t\tif obj:\n\t\t\t\tresults.append(obj)\n\n\t\t\t# if our base_path is 'flame_archive' we\n\t\t\t# need to search 'flame_consolidate' as well\n\t\t\t# NOTE: the reverse is NOT true. \n\t\t\t#print \"Searching flame_consolidate in archive pool\"\n\t\t\talt_path = \"/flame_consolidate/%s\" % ('/'.join(self.catalog_path.split('/')[2:]))\n\t\t\tobj = self._get_project_tina_entries(pool='archive',path_folder=alt_path,refresh=True)\n\t\t\tif obj:\n\t\t\t\tresults.append(obj)\n\n#\t\tif self.base_dir == 'flame_backup':\n#\t\t\t#print \"Searching flame_backup in backup pool\"\n#\t\t\t# normally we shouldn't be restoring from flame_backup\n#\t\t\t# but it can happen\n#\t\t\t# backup pool - strategy 'B'\n#\t\t\tobj = self._get_project_tina_entries(pool='archive')\n#\t\t\tif obj:\n#\t\t\t\tresults.append(obj)\n\n\t\treduced = TinaFind.reduce_results(results)\n\t\treturn reduced", "def check_assembly(config, assemblies):\n config[\"assembler\"] = \"Megahit\" if config[\"megahit\"] else \"Metaspades\"\n if len(assemblies) > 0:\n if config[\"assembly\"][\"metaspades\"]:\n # Remove single-end only assemblies\n # that Metaspades won't be able to run\n assemblies = filter_metaspades_assemblies(assemblies)\n return assemblies", "def search(self, query, mediatype=None):\n items = utils.listItems(self, '/search?query=%s' % quote(query))\n if mediatype:\n return [item for item in items if item.type == mediatype]\n return items", "def get_albums():\n return query_multiple(request.args, album_search, \\\n album_filter, Album, albums_schema)" ]
[ "0.59221876", "0.57674015", "0.55610645", "0.55106276", "0.5507275", "0.54957044", "0.5429782", "0.5363507", "0.5353214", "0.5123019", "0.50999445", "0.5084327", "0.5032342", "0.50054187", "0.50022805", "0.49877578", "0.49617597", "0.49112418", "0.48922423", "0.48880613", "0.48462978", "0.4835609", "0.4834734", "0.47916678", "0.47905543", "0.47897053", "0.47796044", "0.47635126", "0.4742735", "0.47028786" ]
0.64414144
0
Returns True iff a chatter can be activated on the model's form views, i.e. if it is a custom model (since we can make it inherit from mail.thread), or it already inherits from mail.thread.
def is_chatter_allowed(self, model): Model = request.env[model] return Model._custom or isinstance(Model, type(request.env['mail.thread']))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_view_permissions(self, obj):\n queryset = self.model.objects.filter(pk=obj.pk)\n if hasattr(queryset, 'has_view_permissions'):\n return queryset.has_view_permissions( PyFormsMiddleware.user() )\n else:\n return True", "def uses_usercase(self):\n return any(form.uses_usercase() for form in self.get_forms())", "def is_view(self):\n return self._base is not None", "def can_view(self, user):\n if self.applicant == user:\n return True\n elif user.has_perm('funding.view_all_applications'):\n # Fundihg commitee\n return True\n elif user.has_perm('funding.make_application_decisions'):\n # Fundihg manager - should have the view permissions, but just in case\n return True\n return False", "def has_permission(self, request, view):\n return True", "def can_be_viewed_by(self,user):\n\n # check whether everyone is allowed to view this. Anymous user is the only member of group\n # 'everyone' for which permissions can be set\n anonymousUser = get_anonymous_user()\n\n if anonymousUser.has_perm(\"view_ComicSiteModel\",self):\n return True\n else:\n # if not everyone has access, check whether given user has permissions\n return user.has_perm(\"view_ComicSiteModel\",self)", "def has_view_permission(self, request, obj=None):\n user = request.user\n if obj and type(obj) is Client:\n return obj.is_user_in_sales_contacts_of_client(user) or obj.is_user_in_support_contacts_of_client(user)\n return True", "def has_view_permission(self, request, obj=None):\n return True\n opts = self.opts\n codename = get_permission_codename('view', opts)\n return any([\n request.user.has_perm(\"%s.%s\" % (opts.app_label, codename)),\n request.user.has_perm(\"%s.%s\" % (opts.app_label, codename), obj)])", "def has_permission(self, request, view):\n if request.method in permissions.SAFE_METHODS:\n return True\n return False", "def has_object_permission(self, request, view, obj):\n\n try:\n Contact.objects.get(user=request.user)\n\n except Contact.DoesNotExist:\n return False\n\n return True", "def has_permission(self, request, view):\n return False", "def __is_model_permission(self, name: str) -> bool:\n permission_name, model_name = name.split('_')\n\n return permission_name in PERMISSIONS_PREFIXES and model_name in self.models_names", "def has_moderate_permission(self, request):\n if not settings.CMS_MODERATOR:\n return False\n return self.has_generic_permission(request, \"moderate\")", "def has_permission(self, request, view):\n user = request.user\n try:\n user.user_client\n return True\n except Exception:\n return False", "def can_view(self, user):\r\n return True", "def has_permission(self, request):\n\t\treturn request.user.is_active", "def has_permission(self, request):\n return request.user.is_active", "def has_permission(self, request):\n return request.user.is_active", "def has_permission(self, request):\n return request.user.is_active", "def is_model(self):\n return self.model_name() is not None", "def can_be_viewed_by(self,user):\n return True", "def _is_ticketing_handled(self, regform, **kwargs):\n return regform.cern_access_request is not None and regform.cern_access_request.is_active", "def has_object_permission(self, request, view, user):\n return user == request.user or request.user.is_superuser", "def has_permission(self, request, view):\n if request.method == \"POST\":\n return self.model_admin_config.has_add_permission(self, request)\n return True", "def is_visible(cls, request):\n if cls.permission_required:\n return request.user.has_perm(cls.permission_uri)\n else:\n return True", "def has_registered_controller(self, model):\n return model in self._registry", "def can_be_moderated_by(user):\n return user.is_active and user.is_staff and (\n user.has_perm('blog.change_membership') or\n user.has_perm('blog.change_blog'))", "def has_object_permission(self, request, view, obj):\n if request.method in permissions.SAFE_METHODS:\n return True\n if all([request.user, request.user.is_staff]):\n return True\n elif all([request.user, type(obj) == type(request.user), obj == request.user]):\n return True\n\n return True", "def is_view_and_comment(self):\n return self._tag == 'view_and_comment'", "def is_actor(self):\n return True" ]
[ "0.6115297", "0.5955316", "0.5916017", "0.58894145", "0.57932717", "0.57909715", "0.5783127", "0.5749649", "0.5746116", "0.57435966", "0.57211596", "0.56949604", "0.56927466", "0.56890917", "0.56578463", "0.56559616", "0.559266", "0.559266", "0.559266", "0.55788153", "0.55685705", "0.556563", "0.5550565", "0.5544599", "0.5542412", "0.5515455", "0.5485803", "0.5481747", "0.5458053", "0.544541" ]
0.7348025
0
Open a view for translating the field(s) of the record (model, id).
def _get_studio_action_translations(self, model, **kwargs): domain = ['|', ('name', '=', model.model), ('name', 'ilike', model.model + ',')] # search view + its inheritancies views = request.env['ir.ui.view'].search([('model', '=', model.model)]) domain = ['|', '&', ('name', '=', 'ir.ui.view,arch_db'), ('res_id', 'in', views.ids)] + domain def make_domain(fld, rec): name = "%s,%s" % (fld.model_name, fld.name) return ['&', ('res_id', '=', rec.id), ('name', '=', name)] def insert_missing(fld, rec): if not fld.translate: return [] if fld.related: try: # traverse related fields up to their data source while fld.related: rec, fld = fld.traverse_related(rec) if rec: return ['|'] + domain + make_domain(fld, rec) except AccessError: return [] assert fld.translate and rec._name == fld.model_name request.env['ir.translation'].insert_missing(fld, rec) return [] # insert missing translations of views for view in views: for name, fld in view._fields.items(): domain += insert_missing(fld, view) # insert missing translations of model, and extend domain for related fields record = request.env[model.model].search([], limit=1) if record: for name, fld in record._fields.items(): domain += insert_missing(fld, record) action = { 'name': _('Translate view'), 'type': 'ir.actions.act_window', 'res_model': 'ir.translation', 'view_mode': 'tree', 'views': [[request.env.ref('base.view_translation_dialog_tree').id, 'list']], 'target': 'current', 'domain': domain, } return action
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_migrate_view_fields(self):\n self.test_view = RecordView.create(\n self.testcoll, test_view_id, test_view_create_values\n )\n migrate_coll_data(self.testcoll)\n # Read field definition and check for inline field list\n view_data = self.check_entity_values(\n \"_view\", test_view_id, check_values=test_view_migrated_values\n )\n return", "def record_detail(request, slug, pk):\n # Try except to make sure the user is a member of this project\n try:\n ProjectMember.objects.get(user=request.user, project=Project.objects.get(slug=slug))\n except ObjectDoesNotExist:\n # User is not a member\n return HttpResponse(\"You're trying to access a project you're not a member of or a project that does not exist.\")\n else:\n # User is a member, details are provided and template is rendered.\n record = get_object_or_404(models.Record, pk=pk)\n project = models.Project.objects.get(slug=slug)\n template = 'records/record_detail.html'\n data = forms.ShowRecordForm(data=model_to_dict(record), entry=record.entry_type)\n context = {\n 'record':record,\n 'project':project,\n 'userperm':project.memberships.get(user=request.user),\n 'data':data\n }\n return render(request,template,context)", "def view(self):", "def show_activities(self): \n database = Database('data/database.db')\n activities = database.read_activities()\n view = Template(filename=\"view/template.html\", lookup=lookup)\n \n \n return view.render(\n rows = [[item.number, item.name] for item in activities],\n pageTitle = \"Activités\",\n tableTitle = \"Liste de toutes les activités\",\n ths = [\"Numéro\", \"Nom\"]\n )", "def open_viewer(self):\r\n choice = self.thoughts_lst.get(tk.ACTIVE)\r\n subject = self.refference[choice]\r\n tbl = self.home_table[subject]\r\n view = kit.SQL_pull('*', tbl, 'subject_id = \"{}\"'.format(subject))\r\n obj = kit.class_fill(tbl, view[0])\r\n self.session = tk.Toplevel(self.master, **jt.bframe_style)\r\n jv.Viewer(self.session, obj)", "def details(request, pofile_id):\n po_messages = PoMessages.objects.filter(po_file__pk = pofile_id)\n return render_to_response('poeditor/details.html', {\n 'po_messages' : po_messages,\n }, context_instance=RequestContext(request))", "def generateModelEdit(data):\n\n global h_model\n h_model.append(\"\")\n h_model.append(\"\\t// In-Table edit support:\")\n h_model.append(\"\\tvoid store(const QString &sign, const QString &code);\")\n generateModelFlags(data)\n generateModelSetData(data)", "def data_en(request):\n files = myFile.objects.order_by('name')\n context = {'files' : files}\n return render(request, 'sacms/data_en.html', context)", "def office_view(request, slug, id):\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n office_reference = get_object_or_404(Office, id=id,company=company)\n\n return render_to_response('office_form.html', \n {'details': office_reference,'info':office_reference},\n context_instance=RequestContext(request))", "def DEADcreate_v_fix_view():\n sql_view = \"\"\"create or replace view v_fix as\n SELECT \n fix.fix_ident, \n fix.fix_center,\n ST_Y(ST_Transform(fix.fix_center, 4326)) as fix_lat84,\n ST_X(ST_Transform(fix.fix_center, 4326)) as fix_lon84\n \n FROM \n fix\"\"\"\n conf.Cur.execute(sql_view)\n conf.Con.commit()", "def edit(request, id, model, decorator = lambda x:x,\r\n post_save_redirect='', template_name=''):\r\n record = get_or_404(request, model, id)\r\n \r\n FormClass = decorator(\r\n forms.form_for_instance(\r\n record,\r\n fields = get_allowed_fields(request, model),\r\n ), \r\n request,\r\n instance = record\r\n )\r\n \r\n template_name = template_name or _make_template_name(model, 'form')\r\n\r\n #import pdb; pdb.set_trace()\r\n if request.method == 'POST':\r\n form = FormClass(request.POST)\r\n if form.is_valid():\r\n record = form.save()\r\n return HttpResponseRedirect(\r\n post_save_redirect or record.get_absolute_url()\r\n )\r\n else:\r\n form = FormClass()\r\n return render_to_response(\r\n template_name,\r\n context_instance = RequestContext(\r\n request,\r\n {\r\n 'form': form,\r\n }\r\n )\r\n )", "def open_create_partner(self, cr, uid, ids, context=None):\n view_obj = self.pool.get('ir.ui.view')\n view_id = view_obj.search(cr, uid, [('model', '=', self._name), \\\n ('name', '=', self._name+'.view')])\n return {\n 'view_mode': 'form',\n 'view_type': 'form',\n 'view_id': view_id or False,\n 'res_model': self._name,\n 'context': context,\n 'type': 'ir.actions.act_window',\n 'target': 'new',\n }", "def edit_record(self, record):\r\n self.record.editObject(record, id=record['id'])", "def _translate_backupjobrun_detail_view(context, backupjobrun):\n\n d = _translate_backupjobrun_summary_view(context, backupjobrun)\n\n return d", "def show_messages():\n\n messages = Message.query.all()\n # translation_list = [\"\"]\n\n for message in messages:\n # message.translation gives list of objects. All the translation for the \n # language. Here assgin it to one trans_text based on user's language\n # selection. \n message.translation = Translation.query.filter_by(language=g.user.language, \n message_id=message.message_id).first()\n\n return render_template(\"messages.html\", messages=messages, user=g.user)", "def cook(self, obj, request, field_name):\n view_url = ''\n edit_url = ''\n \n if hasattr(obj, 'get_absolute_url'):\n view_url = obj.get_absolute_url();\n if request.user.has_perm('%s.change_%s' %(obj._meta.app_label, obj._meta.model_name)):\n\t\t\tedit_url = reverse('admin:%s_%s_change' %(obj._meta.app_label, obj._meta.model_name), args=[obj.id])\n\t\t\n result = {'text': unicode(obj),\n 'view_url': view_url,\n 'edit_url': edit_url\n }\n return result", "def view_edit(\n request: HttpRequest,\n pk: Optional[int] = None,\n workflow: Optional[Workflow] = None,\n view: Optional[View] = None,\n) -> JsonResponse:\n # Form to read/process data\n form = ViewAddForm(request.POST or None, instance=view, workflow=workflow)\n\n return save_view_form(\n request,\n form,\n 'table/includes/partial_view_edit.html')", "def open_invoice(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n mod_obj = self.pool.get('ir.model.data')\n for advance_pay in self.browse(cr, uid, ids, context=context):\n form_res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_supplier_form')\n form_id = form_res and form_res[1] or False\n tree_res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_tree')\n tree_id = tree_res and tree_res[1] or False\n\n return {\n 'name': _('Advance Invoice'),\n 'view_type': 'form',\n 'view_mode': 'form,tree',\n 'res_model': 'account.invoice',\n 'res_id': int(context['invoice_id'][0]),\n 'view_id': False,\n 'views': [(form_id, 'form'), (tree_id, 'tree')],\n 'context': context,\n 'type': 'ir.actions.act_window',\n }", "def viewJournal(self):\n\n x = self.tableWidget_journals.currentRow()\n Dialog_memo = QtGui.QDialog()\n ui = Ui_Dialog_memo(self.journals[x]['journal'])\n ui.setupUi(Dialog_memo, self.journals[x]['name'] +\" \"+ self.journals[x]['owner'] +\", \"+self.journals[x]['date'])\n Dialog_memo.exec_()\n # update model and database\n newText = ui.getMemo()\n newText = newText.decode('unicode-escape')\n try:\n pass\n newText = newText.decode(\"utf-8\", \"replace\")\n except UnicodeDecodeError:\n print(\"unicode error\")\n if newText != self.journals[x]['journal']:\n self.journals[x]['journal'] = newText\n cur = self.settings['conn'].cursor()\n cur.execute(\"update journal set journal=? where name=?\", (newText, self.journals[x]['name']))\n self.settings['conn'].commit()", "def edit(self, id):\n lm = h.eagerload_morpheme_language_model(Session.query(MorphemeLanguageModel)).get(id)\n if lm:\n return {'data': get_data_for_new_edit(dict(request.GET)),\n 'morpheme_language_model': lm}\n else:\n response.status_int = 404\n return {'error': 'There is no morpheme language model with id %s' % id}", "def __init__(self, *models, **kwargs):\n self.models = models\n self.record_views = {}\n if 'record_view' in kwargs:\n record_view = kwargs['record_view']\n else:\n record_view = RecordView\n for model in self.models:\n if isinstance(model, list) or isinstance(model, tuple):\n model_key = ','.join([model[0]._meta.app_label, model[0]._meta.object_name])\n self.record_views[model_key] = model[1](model[0])\n else:\n model_key = ','.join([model._meta.app_label, model._meta.object_name])\n self.record_views[model_key] = record_view(model)\n\n super(RecordsView, self).__init__(**kwargs)", "def open_table(self, table_name, sort_index):\n self.db = self.__connect_db('local') # Connect to database\n if self.db.open():\n self.model = self.__create_model(self.db, table_name, sort_index) # Create database model\n self.field_index = self.__get_fields(self.model) # Retrieve the fields dictionary\n\n # Set up table header data\n self.model.setHeaderData(self.field_index['NO'], Qt.Horizontal, 'ID Number')\n self.model.setHeaderData(self.field_index['NAME'], Qt.Horizontal, 'Name')\n self.model.setHeaderData(self.field_index['GENDER'], Qt.Horizontal, 'Gender')\n self.model.setHeaderData(self.field_index['BIRTH'], Qt.Horizontal, 'Birth')\n self.model.setHeaderData(self.field_index['PROVINCE'], Qt.Horizontal, 'Province')\n self.model.setHeaderData(self.field_index['DEPT'], Qt.Horizontal, 'Department')\n self.model.setHeaderData(self.field_index['SALARY'], Qt.Horizontal, 'Salary')\n self.model.setHeaderData(self.field_index['PHOTO'], Qt.Horizontal, 'Photo')\n self.model.setHeaderData(self.field_index['MEMO'], Qt.Horizontal, 'Memo')\n\n # Set up mapping between table data and display widgets\n self.mapper = QDataWidgetMapper()\n self.mapper.setModel(self.model)\n self.mapper.setSubmitPolicy(QDataWidgetMapper.AutoSubmit)\n self.mapper.addMapping(self.ui.spin_info_id, self.field_index['NO'])\n self.mapper.addMapping(self.ui.lineedit_name, self.field_index['NAME'])\n self.mapper.addMapping(self.ui.combo_info_sex, self.field_index['GENDER'])\n self.mapper.addMapping(self.ui.dateedit_brith_year, self.field_index['BIRTH'])\n self.mapper.addMapping(self.ui.combo_info_birth_addr, self.field_index['PROVINCE'])\n self.mapper.addMapping(self.ui.combo_info_dept, self.field_index['DEPT'])\n self.mapper.addMapping(self.ui.combo_info_salary, self.field_index['SALARY'])\n self.mapper.addMapping(self.ui.textedit_memo, self.field_index['MEMO'])\n self.mapper.toFirst()\n\n # Set up Selection model for each row of table\n self.sel_model = QItemSelectionModel(self.model)\n self.sel_model.currentChanged.connect(self.act_cur_changed)\n self.sel_model.currentRowChanged.connect(self.act_cur_row_changed)\n\n # Connect table view and table model\n self.ui.tbl_view_show_data.setModel(self.model)\n self.ui.tbl_view_show_data.setSelectionModel(self.sel_model)\n self.ui.tbl_view_show_data.setColumnHidden(self.field_index['PHOTO'], True)\n self.ui.tbl_view_show_data.setColumnHidden(self.field_index['MEMO'], True)\n\n # Customized delegates for table data\n sex_list = ['Male', 'Female']\n self.__delegate_sex = QmyComboBoxDelegate()\n self.__delegate_sex.setItems(sex_list, False) # Link sex list and delegate, not editable\n self.ui.tbl_view_show_data.setItemDelegateForColumn(self.field_index['GENDER'], self.__delegate_sex)\n dept_list = ['CS', 'AI', 'Network', 'Unix', 'Business']\n self.__delegate_dept = QmyComboBoxDelegate()\n self.__delegate_dept.setItems(dept_list, True) # Link dept list and delegate, editable\n self.ui.tbl_view_show_data.setItemDelegateForColumn(self.field_index['DEPT'], self.__delegate_dept)\n\n # Enable and Disable actions\n self.ui.act_add.setEnabled(True)\n self.ui.act_insert.setEnabled(True)\n self.ui.act_del.setEnabled(True)\n self.ui.group_sort.setEnabled(True)\n else:\n QMessageBox.warning(self, 'Error', 'Open database failed')", "def showEditContact(self):", "def showProjectionDialog(self):\n dlg = uic.loadUi('multilineinputdialog.ui')\n dlg.setWindowTitle('Get and set OpenGL ModelView matrix and focus')\n precision = 8 # use default precision\n MV_repr = np.array_repr(self.MV, precision=precision)\n focus_repr = np.array_repr(self.focus, precision=precision)\n txt = (\"self.MV = \\\\\\n\"\n \"%s\\n\\n\"\n \"self.focus = %s\" % (MV_repr, focus_repr))\n dlg.plainTextEdit.insertPlainText(txt)\n dlg.plainTextEdit.selectAll()\n if dlg.exec_(): # returns 1 if OK, 0 if Cancel\n txt = str(dlg.plainTextEdit.toPlainText())\n from numpy import array, float32 # required for exec()\n exec(txt) # update self.MV and self.focus, with hopefully no maliciousness", "def showProjectionDialog(self):\n dlg = uic.loadUi('multilineinputdialog.ui')\n dlg.setWindowTitle('Get and set OpenGL ModelView matrix and focus')\n precision = 8 # use default precision\n MV_repr = np.array_repr(self.MV, precision=precision)\n focus_repr = np.array_repr(self.focus, precision=precision)\n txt = (\"self.MV = \\\\\\n\"\n \"%s\\n\\n\"\n \"self.focus = %s\" % (MV_repr, focus_repr))\n dlg.plainTextEdit.insertPlainText(txt)\n dlg.plainTextEdit.selectAll()\n if dlg.exec_(): # returns 1 if OK, 0 if Cancel\n txt = str(dlg.plainTextEdit.toPlainText())\n from numpy import array, float32 # required for exec()\n exec(txt) # update self.MV and self.focus, with hopefully no maliciousness", "def on_pushButton_view_clicked(self):\n content = unicode(self.comboBox.currentText())\n if content == \"职称表\":\n data = self.sql_client.get_zc_info()\n self.fill_tableview(data)\n elif content == \"文化表\":\n data = self.sql_client.get_wh_info()\n self.fill_tableview(data)\n elif content == \"部门表\":\n data = self.sql_client.get_bm_info()\n self.fill_tableview(data)", "def load_data(model, i18n_model, i18n_dirname):\n position_num = []\n dic_keys = {}\n csv_separator = ':'\n re_num = re.compile('\\d+$')\n\n # Get only the fields name\n fields = [ x for x in model[2:] if not (':' in x or '#' in x) ]\n # Get left data.\n fields_number = len(fields)\n csv_file = model[0]\n model_name = model[1]\n print \"Adding data in %s.%s table\" % (i18n_dirname, model_name)\n # Load the class of the models file\n exec \"%s = getattr(i18n_model, model_name)\" % model_name\n\n # Get the position of numeric fields\n if ':N' in model:\n pos = model.index(':N')\n position_num = model[pos-1] # The field numeric is before of ':N'\n position_num = [ int(x) for x in position_num if not '#' in x ]\n\n # Info. about keys\n if ':K' in model:\n pos = model.index(':K')\n info_keys = model[pos-1]\n # Format-> :[position],[model name]:...\n info_keys = info_keys.split(':')[1:]\n keys = [ (int(x.split(',')[0]), x.split(',')[1]) for x in info_keys ]\n dic_keys = dict(keys)\n\n # To store the keys. Set to values null\n model_id = {}\n for x in dic_keys.keys():\n model_id.setdefault(x, None)\n\n # Convert from CSV to Django ORM\n reader = csv.reader(comment_stripper(\n open(csv_file)), delimiter=csv_separator)\n\n line_bool = [] # Lines where is enabled a boolean field.\n bool_found = False\n line_number = 0\n for csv_line in reader:\n #debug\n# if \\\n# model_name == \"Phone\" or \\\n# model_name == \"AddressFormat\":\n# model_name == \"Country\" or \\\n# model_name == \"CountryLanguage\" or \\\n# model_name == \"Language\" or \\\n# model_name == \"Subdivision\" or \\\n# model_name == \"TimeZone\" or\n# print \"\\tskip\"\n# break\n\n object_line = []\n key_line_s = []\n line_number += 1\n\n object_line.append(\"c%d = %s(\" % (line_number, model_name))\n\n for position in range(0, fields_number):\n field_text = csv_line[position]\n if field_text == 'True':\n if not bool_found:\n bool_field = fields[position]\n bool_found = True\n line_bool.append(line_number)\n elif field_text: # If is not empty\n key_line = []\n if object_line[-1][-1] != '(': # Check the last character\n object_line.append(', ')\n # If is a key\n if dic_keys and dic_keys.has_key(position):\n object_line.append('%s=key_id%d'\n % (fields[position], position))\n key_model = dic_keys.get(position)\n\n # Load the class of the foreigner model.\n try:\n eval(\"%s\" % key_model)\n except NameError:\n exec \"%s = getattr(i18n_model, key_model)\" %key_model\n\n if csv_line[position] != model_id.get(position):\n model_id[position] = csv_line[position]\n\n key_line.append('key_id%d = %s.objects.get(pk='\n % (position, key_model))\n if re_num.match(model_id.get(position)): # integer\n key_line.append('%d)' % model_id.get(position))\n else:\n key_line.append('\"%s\")' % model_id.get(position))\n\n key_line = ''.join(key_line)\n key_line_s.append(key_line)\n\n # If is an integer\n elif position in position_num:\n object_line.append('%s=%s' \\\n % (fields[position], csv_line[position]))\n # If is a string.\n else:\n object_line.append('%s=\"%s\"' \\\n % (fields[position], csv_line[position]))\n\n if key_line_s:\n for key in key_line_s:\n# print key #debug\n exec(key)\n\n object_line.append(\")\")\n load_object = ''.join(object_line)\n# print load_object #debug\n exec(load_object) # Load the object\n\n # At the end, save all objects together\n if model_name == 'Language':\n # Display the english language.\n for num in range(1, line_number+1):\n obj = eval(\"c%d\" % num)\n if obj.iso3_code == 'eng':\n obj.display = True\n obj.save()\n else:\n for num in range(1, line_number+1):\n obj = eval(\"c%d\" % num)\n if num in line_bool:\n exec(\"obj.%s = True\" % bool_field)\n try:\n \tobj.save()\n except:\n print \"Problem loading data. Entry will not be loaded.\"\n try:\n transaction.rollback()\n except:\n #Some databases were having trouble with the rollback\n pass", "def edit(self, *args, **kw):\n tmpl_context.widget = self.edit_form\n #pks = self.provider.get_primary_fields(self.model)\n \n log.debug(\"soyRomperLB= %s\" %kw)\n\n ###########################################\n pks = self.provider.get_primary_fields(self.model)\n \n ###########################################\n kw = {}\n for i, pk in enumerate(pks):\n kw[pk] = args[i]\n value = self.edit_filler.get_value(kw)\n value['_method'] = 'PUT'\n return dict(value=value, model=self.model.__name__, pk_count=len(pks))", "def show_activity(self, number):\n database = Database('data/database.db')\n activ = database.read_activity(number)\n view = Template(filename=\"view/template.html\", lookup=lookup)\n\n try:\n render = view.render(\n rows = [[activ.number, activ.name]],\n pageTitle = \"Activité \" + number,\n tableTitle = \"Activité \" + number,\n ths = [\"Numéro\", \"Nom\"]\n )\n except AttributeError:\n render = view.render(\n rows = [],\n pageTitle = \"Activité \" + number,\n tableTitle = \"Activité \" + number,\n ths = [\"Numéro\", \"Nom\"]\n )\n \n return render", "def detail(model_id: str = typer.Argument(..., help='Model ID')):\n with requests.get(f'{app_settings.api_v1_prefix}/model/{model_id}') as r:\n data = r.json()\n model_detailed_view(MLModel.parse_obj(data))" ]
[ "0.5074276", "0.50698423", "0.5038963", "0.5035691", "0.50303924", "0.50299984", "0.5016612", "0.49554166", "0.4932844", "0.49023816", "0.48956478", "0.48940113", "0.48839295", "0.48726508", "0.48695204", "0.4864835", "0.4864338", "0.48637855", "0.4852656", "0.4843753", "0.4832436", "0.4783692", "0.4780307", "0.47771993", "0.47771993", "0.47547522", "0.4742139", "0.47114816", "0.47084478", "0.4708159" ]
0.5943181
0
Create a new menu , linked to a new action associated to the model_id
def create_new_menu(self, name, model_id, is_app=False, parent_id=None, icon=None): # create the action model = request.env['ir.model'].browse(model_id) new_action = request.env['ir.actions.act_window'].create({ 'name': name, 'res_model': model.model, 'help': """ <p> This is your new action ; by default, it contains a list view and a form view. </p> <p> You can start customizing these screens by clicking on the Studio icon on the top right corner (you can also customize this help message there). </p> """, }) action_ref = 'ir.actions.act_window,' + str(new_action.id) if is_app: # create the menus (app menu + first submenu) new_context = dict(request.context) new_context.update({'ir.ui.menu.full_list': True}) # allows to create a menu without action new_menu = request.env['ir.ui.menu'].with_context(new_context).create({ 'name': name, 'web_icon': ','.join(icon), 'child_id': [(0, 0, { 'name': name, 'action': action_ref, })] }) else: # create the submenu new_menu = request.env['ir.ui.menu'].create({ 'name': name, 'action': action_ref, 'parent_id': parent_id, }) return { 'menu_id': new_menu.id, 'action_id': new_action.id, }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_menu():", "def create_menus( self ):", "def restaurantMenuItemNew(restaurant_id):\n try:\n restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n if request.method == 'POST':\n if request.form['name']:\n newItem = MenuItem(name=request.form['name'], description=request.form[\n 'description'], price=request.form['price'], course=request.form['course'], restaurant_id=restaurant_id)\n session.add(newItem)\n session.commit()\n\n flash('Menu Item Created', 'menu')\n return redirect(url_for('restaurantMenu', restaurant_id=restaurant_id))\n else:\n return render_template('menuItemNew.html', restaurant=restaurant)\n\n except exc.NoResultFound:\n return redirect(url_for('restaurantMenu', restaurant_id=restaurant_id))", "def makeActionMenu(self):\n\t\tself.newAct = QtGui.QAction(self.tr(\"&Novo\"),self)\n\t\tself.newAct.setShortcut(self.tr(\"Ctrl+N\"))\n\t\tself.newAct.setStatusTip(self.tr(\"Cria uma nova area de desenho em branco\"))\n\t\tself.connect(self.newAct,SIGNAL(\"triggered()\"),self.glwidget.newFile)\n\t\t\n\t\tself.openAct = QtGui.QAction(self.tr(\"&Abrir\"),self)\n\t\tself.openAct.setShortcut(self.tr(\"Ctrl+o\"))\n\t\tself.openAct.setStatusTip(self.tr(\"Abrir arquivo do elvis\"))\n\t\tself.connect(self.openAct,SIGNAL(\"triggered()\"),self.glwidget.openElvisfile)\t\t\n\n\t\tself.saveAct = QtGui.QAction(self.tr(\"&Salvar\"),self)\n\t\tself.saveAct.setShortcut(self.tr(\"Ctrl+S\"))\n\t\tself.saveAct.setStatusTip(self.tr(\"Salva a imagem do canvas\"))\n\t\tself.connect(self.saveAct,SIGNAL(\"triggered()\"),self.glwidget.saveElvisfile)\n\t\t\n\t\tself.exportAct = QtGui.QAction(self.tr(\"&Exportar SVG\"),self)\n\t\tself.exportAct.setShortcut(self.tr(\"Ctrl+E\"))\n\t\tself.exportAct.setStatusTip(self.tr(\"Exporta para formato SVG\"))\n\t\tself.connect(self.exportAct,SIGNAL(\"triggered()\"),self.glwidget.ExportSVG)\n\t\t\t\t\n\t\t\n\t\tself.exitAct = QtGui.QAction(self.tr(\"&Sair\"),self)\n\t\tself.exitAct.setStatusTip(self.tr(\"Sair do programa\"))\n\t\tself.connect(self.exitAct,SIGNAL(\"triggered()\"),self.close)\n\t\t\n\t\n\t\tself.aboutAct = QtGui.QAction(self.tr(\"&Sobre\"),self)\n\t\tself.aboutAct.setStatusTip(self.tr(\"Sobre o programa\"))\n\t\tself.connect(self.aboutAct,SIGNAL(\"triggered()\"),self.about)", "def goto_create(self):\n\n self.create.click()", "def newMenuItem(restaurant_id):\n\n if 'access_token' not in flask_session:\n return logInRedirect()\n restaurant = session.query(Restaurant).filter_by(id = restaurant_id).first()\n user_id = getUserId(flask_session['email'],flask_session['google_plus_id'])\n if not restaurant.user_id == user_id:\n flash(\"Only restaurant owners can add new items.\")\n return redirect(url_for(\"publicMenu\",restaurant_id = restaurant_id))\n\n if request.method == \"POST\":\n new_name = request.form['new_name']\n print \"\\nnewMenuItem POST triggered, name is: \", new_name\n newMenuItem = MenuItem( name=new_name,\n restaurant_id=restaurant.id )\n session.add(newMenuItem)\n session.commit()\n flash( \"new item '\" + new_name + \"' created!\")\n print \"POST worked!\"\n return redirect(url_for(\"showMenu\", restaurant_id=restaurant.id))\n\n else:\n return render_template('newMenuItem.html', restaurant = restaurant)", "def menu_item_new(restaurant_id):\n # If the user isn't logged in, send to the login page\n if helper.handle_login(login_session) is False:\n return redirect('/login')\n\n restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n img_id = 0\n if request.method == 'POST':\n if 'file' in request.files:\n print(\"File found\")\n img_id = helper.create_new_image_if_not_exists(file=request.files['file'],\n title=request.form['img_name'])\n new_item = MenuItem(name=request.form['name'],\n description=request.form['description'],\n price=request.form['price'],\n course=request.form['course'],\n likes=0,\n dislikes=0,\n restaurant_id=restaurant_id,\n user_id=login_session['user_id'],\n image_id=img_id)\n session.add(new_item)\n session.commit()\n flash(\"New Menu Item {} created!\".format(new_item.name))\n return redirect(url_for('restaurant_menu', restaurant_id=restaurant_id))\n else:\n user_info = helper.get_user_if_exists(login_session)\n return render_template('newmenuitem.html', restaurant=restaurant, user_info=user_info)", "def new(self, *args, **kw):\n id_tipo_item = UrlParser.parse_id(request.url, \"tipositems\")\n url_action = \"./\"\n\n pp = PoseePermiso('redefinir tipo item', id_tipo_item=id_tipo_item)\n if not pp.is_met(request.environ):\n flash(pp.message % pp.nombre_permiso, 'warning')\n redirect(atras)\n tmpl_context.widget = self.new_form\n return dict(value=kw, \n page=u\"Nuevo Atributo\", \n action=url_action, \n atras=url_action)", "def makeMenu(self):\n\t\tself.fileMenu = self.menuBar().addMenu(self.tr(\"&Arquivo\"))\n\t\tself.fileMenu.addAction(self.newAct)\n\t\tself.fileMenu.addAction(self.openAct)\n\t\tself.fileMenu.addAction(self.saveAct)\n\t\tself.fileMenu.addAction(self.exportAct)\n\t\tself.fileMenu.addSeparator() \n\t\tself.fileMenu.addAction(self.exitAct)\n\n\t\tself.editMenu = self.menuBar().addMenu(self.tr(\"&Editar\"))\n\t\t\n\t\tself.helpMenu = self.menuBar().addMenu(self.tr(\"&Ajuda\"))\n\t\tself.helpMenu.addAction(self.aboutAct)", "def _newFileMenuItem(self):\n\n dialogs = Dialogs(self.view)\n\n path = self._newFileWizard()\n\n #see if we want to make a blank scene or not\n msg = 'How should the new file be created?'\n BLANK = 'Make a blank maya scene'\n EXISTING = 'Use a copy of an existing file'\n\n choice = dialogs.radioButtonDialog(msg, [BLANK, EXISTING])\n\n if choice == BLANK:\n msg = 'Final confirmation:'\n msg += '\\n\\nCreate blank maya file at \"%s\"?' % path\n dialogs.confirmPrompt(msg)\n self.model.createFile(path)\n\n elif choice == EXISTING:\n src_path = dialogs.fileDialog(\n self.model.project.getScenesDir(),\n self.model.project.getDialogFilters())\n\n msg = 'Please confirm:'\n msg += '\\n\\nCopy \"%s\" to new file \"%s\"?' % (src_path, path)\n dialogs.confirmPrompt(msg)\n self.model.copyFile(src_path, path)\n\n msg = 'New file successfully created!'\n msg += '\\n\\nLocation: %s' % path\n msg += '\\n\\nPlease check out your new file to begin work on it.'\n dialogs.infoPrompt(msg)", "def admincreate(object):\n if request.method == \"POST\":\n\n db = get_db()\n execute_string = 'INSERT INTO ' + object.title()\n\n if object == 'post':\n execute_string += '(title, content, authorId, categoryId) VALUES (\"' + request.form['title'] + '\", \"' + request.form[\"content\"] + '\", \"' + request.form[\"authorid\"] + '\", \"' + request.form[\"categoryid\"] + '\")'\n elif object == 'author':\n execute_string += '(name) VALUES (\"' + request.form['name'] + '\")'\n elif object == 'category':\n execute_string += '(name, description) VALUES (\"' + request.form['name'] + '\", \"' + request.form[\"description\"] + '\")'\n\n db.execute(execute_string)\n db.commit()\n return redirect(url_for(\"adminview\", object=object))\n\n return render_template(\"new.html\", object=object, item={})", "def createSequence(self,**kwargs):\n members = self.bl.getAllSavedActions() \n entries={}\n\n num = len(self.actionSequence)\n self.baxter.mm.changeMenuTitle(\"%f actions saved: %s\" % (num, str(self.actionSequence)))\n\n for param in members:\n entries[str(param)] = self.chooseBlock\n\n entries[\"Run Sequence\"] = self.runSequence\n entries[\"Reset\"] = self.resetSequence\n self.mm.addGenericMenu(\"sequenceMenu\",self.mm.cur_page,\"Select the action to add to the sequence\", entries)\n self.mm.loadMenu(\"sequenceMenu\")", "def create_menu(self, parent):\n menu = QtGui.QMenu(parent=parent)\n return menu.menuAction()", "def new(self, *args, **kw):\n\n\t\t\tif len(args) > 0:\n\t\t\t\tkw['id_fase_fk']= args[0] \n\n\t\t\ttmpl_context.widget = self.new_form\n\t\t\tretorno \t\t= dict(value = kw, model = self.model.__name__)\n\t\t\tretorno['fid']\t= args[0]\n\n\t\t\treturn retorno", "def newMenuItemPage(restaurant_id):\n restaurant = db_methods.searchResByID(restaurant_id)\n res_id = restaurant_id\n user_id = login_session['user_id']\n if request.method == 'POST':\n item_name = request.form['item_name']\n item_price = request.form['item_price']\n item_desc = request.form['item_desc']\n item_course = request.form['item_course']\n if item_name and item_price and item_desc and item_course:\n db_methods.addNewMenuItem(user_id, item_name, item_price, \n item_desc, item_course, res_id)\n time.sleep(0.1)\n return redirect(\"/restaurants/%s/menu/\" % res_id)\n else:\n error = \"Please be sure to fill out all required fields.\"\n return render_template('newmenuitem.html', error = error)\n else:\n return render_template('newmenuitem.html', res_id = res_id)", "def projectMenuActions( self, action ):\n\tif ( action.text() == 'Create Project' ): \n\t self.CreateProjectWidget()", "def newModel(style_id):\n style = session.query(Style).filter_by(id=style_id).one()\n if request.method == 'POST':\n newModel = Model(\n name=request.form['name'],\n description=request.form['description'],\n price=request.form['price'],\n power=request.form['power'],\n image=request.form['image'],\n style_id=style_id,\n user_id=login_session['user_id'])\n session.add(newModel)\n session.commit()\n flash(\n 'New Model %s Successfully Created' % (newModel.name),\n 'alert-success')\n return redirect(url_for('showModels', style_id=style_id))\n else:\n return render_template('newModel.html', style=style)", "def make_menu_action(action, menu, win):\n\n\t# create the item\n\tif action.icon != \"\":\n\t\titem = Gtk.ImageMenuItem(win.ui.get_icon(action.icon),\n\t\t\tlabel=action.label, always_show_image = True, use_stock = True)\n\telse:\n\t\titem = Gtk.MenuItem(action.label)\n\tif action.help != \"\":\n\t\titem.set_tooltip_text(action.help)\n\tmenu.append(item)\n\t\n\t# connect the item\n\tobs = MenuObserver(item, action, win)\n\tfor dep in action.get_deps():\n\t\tdep.add_observer(obs)\n\titem.connect(\"activate\", obs.activate)", "def createMenu():\n mType = -1\n if auth.is_logged_in() and auth.has_membership('administrador',auth.user.id):\n return menuAdmin\n elif auth.is_logged_in():\n return menuUser\n else:\n return menuPublic", "def new(self, *args, **kw):\n pp = PoseePermiso('crear rol')\n if not pp.is_met(request.environ):\n flash(pp.message % pp.nombre_permiso, 'warning')\n redirect(self.action)\n tmpl_context.widget = self.new_form\n if request.environ.get('HTTP_REFERER') == \"http://\" + request.environ.get('HTTP_HOST',) + \"/\":\n atras = \"../\"\n else:\n atras = \"/roles\"\n return dict(value=kw, page=\"Nuevo Rol\", action=self.action, atras=atras)", "def newItem():\n if request.method == 'POST':\n db.createItem(\n title=request.form['title'],\n description=request.form['description'],\n category_id=request.form['category'],\n user_id=login_session['user_id'])\n flash(\"New catalog item created!\", 'success')\n return redirect(url_for('showCatalog'))\n return render_template('new_item.html', categories=db.getAllCategories())", "def create_menu_par(self, name, trig_func, menu, shrt_cut):\n\n createdAction = QAction(name, self)\n createdAction.setShortcut(shrt_cut)\n createdAction.triggered.connect(trig_func)\n menu.addAction(createdAction)\n return createdAction", "def addMenu():\n mb.addAction(actionAccessories)\n actionAccessories.setVisible(True)", "def create_menu_item(menu, label, func):\n item = wx.MenuItem(menu, -1, label)\n menu.Bind(wx.EVT_MENU, func, id=item.GetId())\n menu.Append(item)\n return item", "def new(self, *args, **kw):\n \"\"\"filtramos solo los Tipos de Items asociados al Proyecto\"\"\"\n id_fase = args[0]\n #traemos la fase de la BD\n fase = DBSession.query(Fase).filter_by(id_fase = id_fase).one()\n #obtenemos los Tipos de Items asignados a la fase..\n tipo_items = fase.tipo_items\n #obtenemos lo id y los nombres de los tipos de items del Proyecto para enviarlos como las opciones \n #disponibles\n id_tipo_items = []\n for tipo_item in tipo_items:\n id_tipo_items.append((tipo_item.id_tipo_item,tipo_item.nombre))\n\n if len(args) > 0:\n kw['id_fase_fk'] = args[0]\n\n tmpl_context.widget = self.new_form\n retorno = dict(value=kw, model=self.model.__name__)\n retorno[\"id_tipo_items\"] = id_tipo_items\n\n return retorno", "def createAction(self):\n self.createProjectAction = QtGui.QAction(self.tr(\"&New Project\"), self)\n self.createProjectAction.setShortcut(QtGui.QKeySequence.New)\n self.createProjectAction.setStatusTip(self.tr(\"Create a new project\"))\n self.connect(self.createProjectAction, QtCore.SIGNAL(\"triggered()\"), self, QtCore.SLOT(\"newProject()\"))\n\n self.openProjectAction = QtGui.QAction(self.tr(\"&Open...\"), self)\n self.openProjectAction.setShortcut(QtGui.QKeySequence.Open)\n self.openProjectAction.setStatusTip(self.tr(\"Open an existing project\"))\n self.connect(self.openProjectAction, QtCore.SIGNAL(\"triggered()\"), self, QtCore.SLOT(\"openProject()\"))\n\n self.saveProjectAction = QtGui.QAction(self.tr(\"&Save\"), self)\n self.saveProjectAction.setShortcut(QtGui.QKeySequence.Save)\n self.saveProjectAction.setStatusTip(self.tr(\"Save the current project\"))\n self.connect(self.saveProjectAction, QtCore.SIGNAL(\"triggered()\"), self, QtCore.SLOT(\"save()\"))\n\n self.importVideoAction = QtGui.QAction(self.tr(\"&Import video...\"), self)\n self.importVideoAction.setStatusTip(self.tr(\"Import a video into your project\"))\n self.connect(self.importVideoAction, QtCore.SIGNAL(\"triggered()\"), self, QtCore.SLOT(\"importVideo()\"))\n\n self.aboutAction = QtGui.QAction(self.tr(\"&About\"), self)\n self.aboutAction.setStatusTip(self.tr(\"Show the credits and authors\"))\n self.connect(self.aboutAction, QtCore.SIGNAL(\"triggered()\"), self, QtCore.SLOT(\"showAbout()\"))", "def create_view(request, title, modelform, **kwargs):\n instance_form = modelform(request.POST or None)\n if instance_form.is_valid():\n instance = instance_form.save(commit=False)\n for default in kwargs.keys():\n setattr(instance, default, kwargs[default])\n instance.save()\n messages.success(request, _(\"%s was created.\") % instance)\n return redirect(instance.get_absolute_url())\n return form(\n {**kwargs, \"form\": instance_form, \"action_name\": _(\"Create\"), \"title\": title},\n \"deployments/form.html\",\n request,\n )", "def goto_create_course(self):\n\n self.create.click()", "def createMenuItem(parent, menu, label, status=None, handler=None, id=-1, kind=wx.ITEM_NORMAL):\n\n if not label:\n menu.AppendSeparator()\n else:\n item = menu.Append(id, label, status, kind)\n if handler:\n parent.Bind(wx.EVT_MENU, handler, item)", "def create(self, validated_data):\n # Check if user wants to add new dish at the menu creation\n # If not, return new menu without dishes\n try:\n dishes_data = validated_data.pop('dishes')\n except KeyError:\n menu = Menu.objects.create(**validated_data)\n return menu\n\n menu = Menu.objects.create(**validated_data)\n\n for dish_data in dishes_data:\n Dish.objects.create(menu=menu, **dish_data)\n\n return menu" ]
[ "0.6816038", "0.66699916", "0.65619886", "0.62834644", "0.6270374", "0.62515026", "0.61413074", "0.6103415", "0.6014899", "0.5994546", "0.5990907", "0.5984991", "0.5979235", "0.5972948", "0.5971281", "0.5969421", "0.5925181", "0.5908922", "0.5885741", "0.5874071", "0.5808455", "0.5793427", "0.5765566", "0.5750098", "0.57423615", "0.57421726", "0.5738823", "0.5711744", "0.5707728", "0.57040197" ]
0.7331614
0
Exports a zip file containing the 'studio_customization' module gathering all customizations done with Studio (customizations of existing apps and freshly created apps).
def export(self, token): studio_module = request.env['ir.module.module'].get_studio_module() data = request.env['ir.model.data'].search([('studio', '=', True)]) content = export.generate_archive(studio_module, data) return request.make_response(content, headers=[ ('Content-Disposition', content_disposition('customizations.zip')), ('Content-Type', 'application/zip'), ('Content-Length', len(content)), ], cookies={'fileToken': token})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def export_configurations():\n pass", "def build_package_includes(self):\n location = self.options['location']\n sitezcml_path = os.path.join(location, 'etc', 'site.zcml')\n zcml = self.options.get('zcml')\n site_zcml = self.options.get('site-zcml')\n additional_zcml = self.options.get(\"zcml-additional\")\n resources = self.options.get(\"resources\")\n\n if site_zcml:\n open(sitezcml_path, 'w').write(site_zcml)\n return\n\n if zcml:\n zcml = zcml.split()\n\n if additional_zcml or resources or zcml:\n includes_path = os.path.join(location, 'etc', 'package-includes')\n\n if not os.path.exists(includes_path):\n # Zope 2.9 does not have a package-includes so we\n # create one.\n os.mkdir(includes_path)\n else:\n if zcml and '*' in zcml:\n zcml.remove('*')\n else:\n shutil.rmtree(includes_path)\n os.mkdir(includes_path)\n\n if additional_zcml:\n path = os.path.join(includes_path, \"999-additional-overrides.zcml\")\n open(path, \"w\").write(additional_zcml.strip())\n\n if resources:\n resources_path = resources.strip()\n path = os.path.join(includes_path, \"998-resources-configure.zcml\")\n open(path, \"w\").write(\n # TODO: what was meant to be done here?\n # XXX: resources_zcml is undefined!\n resources_zcml % dict(directory=resources_path)\n )\n\n if not os.path.exists(resources_path):\n os.mkdir(resources_path)\n\n if zcml:\n n = 0\n package_match = re.compile('\\w+([.]\\w+)*$').match\n for package in zcml:\n n += 1\n orig = package\n if ':' in package:\n package, filename = package.split(':')\n else:\n filename = None\n\n if '-' in package:\n package, suff = package.split('-')\n file_suff = suff\n if suff not in ('configure', 'meta', 'overrides'):\n file_suff = '%s-configure' % suff\n else:\n suff = file_suff = 'configure'\n\n if filename is None:\n filename = suff + '.zcml'\n\n if not package_match(package):\n raise ValueError('Invalid zcml', orig)\n\n path = os.path.join(\n includes_path,\n \"%3.3d-%s-%s.zcml\" % (n, package, file_suff),\n )\n open(path, 'w').write(\n '<include package=\"%s\" file=\"%s\" />\\n'\n % (package, filename)\n )", "def export_applications(self):\n print('\\n=== Exporting all application data...')\n\n for application in self.client.applications:\n print('- Exporting application:', application.name)\n\n json = {\n 'id': self.get_id(application),\n 'href': application.href,\n 'name': application.name,\n 'description': application.description,\n 'status': application.status,\n 'createdAt': application.created_at.isoformat(),\n 'modifiedAt': application.modified_at.isoformat(),\n 'customData': self.get_custom_data(application),\n 'default_account_store_mapping': None,\n 'default_group_store_mapping': None,\n 'account_store_mappings': [],\n #'verificationEmails': [],\n }\n\n default_account_store_mapping = application.default_account_store_mapping\n default_group_store_mapping = application.default_group_store_mapping\n\n if default_account_store_mapping:\n json['default_account_store_mapping'] = {\n 'id': application.default_account_store_mapping.href.split('/')[-1],\n 'href': application.default_account_store_mapping.href,\n 'type': application.default_account_store_mapping.account_store.__class__.__name__,\n 'name': application.default_account_store_mapping.account_store.name,\n 'list_index': application.default_account_store_mapping.list_index,\n }\n\n if default_group_store_mapping:\n json['default_group_store_mapping'] = {\n 'id': application.default_group_store_mapping.href.split('/')[-1],\n 'href': application.default_group_store_mapping.href,\n 'type': application.default_group_store_mapping.account_store.__class__.__name__,\n 'name': application.default_group_store_mapping.account_store.name,\n 'list_index': application.default_group_store_mapping.list_index,\n }\n\n for account_store_mapping in application.account_store_mappings:\n json['account_store_mappings'].append({\n 'id': self.get_id(account_store_mapping),\n 'href': account_store_mapping.href,\n 'account_store': {\n 'type': account_store_mapping.account_store.__class__.__name__,\n 'id': self.get_id(account_store_mapping.account_store),\n 'href': account_store_mapping.account_store.href,\n 'name': account_store_mapping.account_store.name,\n 'description': account_store_mapping.account_store.description,\n 'status': account_store_mapping.account_store.status,\n },\n 'list_index': account_store_mapping.list_index,\n 'is_default_account_store': account_store_mapping.is_default_account_store,\n 'is_default_group_store': account_store_mapping.is_default_group_store,\n })\n\n tenant = self.get_id(application.tenant)\n self.write('%s/%s/applications/%s' % (self.location, tenant, json['id']), json)\n\n print('=== Done!\\n')", "def write_less(env):\n dbname = env.cr.dbname\n addon_path = env['ir.config_parameter'].get_param(\n 'blue_custom_branding.addon_path')\n fname = \"{}/static/src/less/variables_{}.less\".format(addon_path, dbname)\n\n companies = env['res.company'].search([])\n try:\n f = open(fname, \"w\")\n for company in companies:\n less_string = \"\"\"\n @brand-primary-{database}-{company_id}: #{primary};\n @brand-success-{database}-{company_id}: #{success};\n @brand-info-{database}-{company_id}: #{info};\n @brand-warning-{database}-{company_id}: #{warning};\n @brand-danger-{database}-{company_id}: #{danger};\n @navbar-default-bg-{database}-{company_id}: @brand-primary-{database}-{company_id}; // @brand-primary\n @navbar-inverse-bg-{database}-{company_id}: @brand-info-{database}-{company_id}; // @brand-info\n @label-primary-bg-{database}-{company_id}: @brand-primary-{database}-{company_id}; // @brand-primary\n \"\"\".format(\n primary=company.theme_color_primary,\n success=company.theme_color_success,\n info=company.theme_color_info,\n warning=company.theme_color_warning,\n danger=company.theme_color_danger,\n database=dbname,\n company_id=company.id, )\n f.write(less_string)\n f.close()\n except Exception as e:\n _logger.debug('Theme error writing to file : %s' % e)", "def zipAll(app):\n context = app.context\n\n backend = app.backend\n base = backendRep(backend, \"clone\")\n org = context.org\n repo = context.repo\n relative = context.relative\n relative = prefixSlash(normpath(relative))\n version = context.version\n\n graphics = context.graphicsRelative\n graphics = prefixSlash(normpath(graphics))\n\n prov = context.provenanceSpec\n mods = prov.get(\"moduleSpecs\", [])\n\n repoDir = f\"{base}/{org}/{repo}\"\n\n dataItems = [\n (\"app\", f\"{repoDir}/{APP_APP}\"),\n (\"main data\", f\"{repoDir}{relative}/{version}\"),\n ]\n if graphics:\n dataItems.append((\"graphics\", f\"{repoDir}{graphics}\"))\n\n good = True\n\n for mod in mods:\n mbackend = mod[\"backend\"]\n if mbackend is None:\n mbackend = app.backend\n mbase = backendRep(mbackend, \"clone\")\n morg = mod[\"org\"]\n mrepo = mod[\"repo\"]\n mrelative = mod[\"relative\"]\n mrelative = prefixSlash(normpath(mrelative))\n mrepoDir = f\"{mbase}/{morg}/{mrepo}\"\n labelItems = []\n if mbase != base:\n labelItems.append(mbase)\n if morg != org:\n labelItems.append(morg)\n if mrepo != repo:\n labelItems.append(mrepo)\n if mrelative != relative:\n labelItems.append(mrelative)\n label = \"-\".join(labelItems)\n if mbase != base:\n good = False\n console(f\"ERROR: module {label} not on expected backend {backend}\")\n dataItems.append((f\"module {label}\", f\"{mrepoDir}{mrelative}/{version}\"))\n\n if not good:\n return\n\n destBase = f\"{DW}/{backendRep(backend, 'norm')}\"\n dest = normpath(f\"{destBase}/{org}/{repo}\")\n destFile = f\"{dest}/{APP_EXPRESS_ZIP}\"\n\n console(\"Data to be zipped:\")\n results = []\n\n for (label, path) in dataItems:\n if dirExists(path):\n (release, commit) = addCheckout(path)\n checkout = f\"({release or 'v??'} {commit[-6:] if commit else '??'})\"\n zipBase = path.removeprefix(f\"{base}/\")\n collectFiles(path, \"\", results, zipBase=zipBase)\n status = \"OK\"\n else:\n good = False\n status = \"missing\"\n checkout = \"(??)\"\n console(f\"\\t{status:<8} {label:<24} {checkout:<20}: {path}\")\n\n if not good:\n return\n\n if not dirExists(dest):\n dirMake(dest)\n console(\"Writing zip file ...\")\n with ZipFile(destFile, \"w\", **ZIP_OPTIONS) as zipFile:\n for (internalPath, path) in sorted(results):\n zipFile.write(\n path,\n arcname=internalPath,\n )\n return ux(destFile)", "def general_export(request):\n export_fields = OrderedDict(GENERAL_EXPORT_FIELDS)\n export = OpenXMLExport('Exportation')\n export.write_line(export_fields.keys(), bold=True) # Headers\n # Data\n query_keys = [f for f in export_fields.values() if f is not None]\n query = Student.objects.filter(archived=False).order_by('klass__name', 'last_name', 'first_name')\n for line in query.values(*query_keys):\n values = []\n for field in query_keys:\n if field == 'gender':\n values.append(('Madame', 'Monsieur')[line[field] == 'M'])\n elif field in ('dispense_ecg', 'dispense_eps', 'soutien_dys'):\n values.append('Oui' if line[field] is True else '')\n else:\n values.append(line[field])\n export.write_line(values)\n\n return export.get_http_response('general_export')", "def zip_files():\n zipper = ZipFile(\"Moritz_Bunse_ML_project.zip\", \"w\")\n files_to_write = [\"poi_id.py\",\n \"my_classifier.pkl\",\n \"my_dataset.pkl\",\n \"my_feature_list.pkl\",\n \"tester.py\",\n \"Look+At+Enron+data+set.html\",\n \"Look At Enron data set.ipynb\",\n \"data_dict.pkl\",\n \"final_project_dataset.pkl\",\n \"img/Flow chart feature selection.png\"\n ]\n for filename in files_to_write:\n zipper.write(filename)\n\n zipper.close()", "def custom_package_xml_generator(directory, packagename=None, version='45.0', filename='package.xml'):\n\n METADATA_TYPE = {\n 'applications':'CustomApplication', 'aura':'AuraDefinitionBundle', 'classes':'ApexClass', 'customPermissions':'CustomPermission', \n 'flexipages':'FlexiPage', 'flows':'Flow', 'globalValueSets':'GlobalValueSet', 'labels':'CustomLabels', 'layouts':'Layout',\n 'lwc': 'LightningComponentBundle', 'objects':'CustomObject', 'pages':'ApexPage', 'permissionsets':'PermissionSet', 'profiles':'Profile',\n 'staticresources':'StaticResource', 'tabs':'CustomTab', 'triggers':'ApexTrigger', 'contentassets':'ContentAsset', 'pathAssistants':'PathAssistant',\n 'quickActions':'QuickAction', 'remoteSiteSettings':'RemoteSiteSetting', 'workflows':'Workflow', 'dashboards':'Dashboard', 'reports':'Report',\n 'cspTrustedSites':'CspTrustedSite',\n }\n\n \"\"\"\n Non-implemented Metadata:\n 'ApexComponent', 'CustomMetadata' (needs custom manipulation), 'CustomObjectTranslation', 'DuplicateRule', \n 'FlowCategory', 'GlobalValueSetTranslation', 'MatchingRules',\n \"\"\"\n #read directory structure\n\n mdtypedirs = os.listdir(directory)\n\n nested_mdt_object = ['ValidationRule', 'CompactLayout', 'ListView', 'SharingReason', 'RecordType']\n nested_mdt_workflow = ['WorkflowFieldUpdate', 'WorkflowKnowledgePublish', 'WorkflowTask', 'WorkflowAlert', 'WorkflowSend', 'WorkflowOutboundMessage', 'WorkflowRule']\n\n # start our xml structure\n root = xml.Element('Package')\n root.set('xmlns','http://soap.sforce.com/2006/04/metadata')\n\n for mdtype in mdtypedirs:\n # create child node for each type of component\n if mdtype in METADATA_TYPE.keys():\n etype = xml.SubElement(root, 'types')\n ename = xml.SubElement(etype, 'name')\n ename.text = str(METADATA_TYPE[mdtype])\n emember = xml.SubElement(etype, 'members')\n emember.text = str('*')\n \n if mdtype == 'objects':\n for nest_mdtyp in nested_mdt_object:\n etype = xml.SubElement(root, 'types')\n ename = xml.SubElement(etype, 'name')\n ename.text = nest_mdtyp\n emember = xml.SubElement(etype, 'members')\n emember.text = str('*')\n\n if mdtype == 'workflows':\n for nest_mdtyp in nested_mdt_workflow:\n etype = xml.SubElement(root, 'types')\n ename = xml.SubElement(etype, 'name')\n ename.text = nest_mdtyp\n emember = xml.SubElement(etype, 'members')\n emember.text = str('*')\n\n #Custom behavior for custom labels\n if mdtype == 'labels':\n etype = xml.SubElement(root, 'types')\n ename = xml.SubElement(etype, 'name')\n ename.text = 'CustomLabel'\n emember = xml.SubElement(etype, 'members')\n emember.text = str('*')\n\n # add the final xml node package.api_version\n eversion = xml.SubElement(root, 'version')\n eversion.text = str(version)\n\n #package name\n if packagename != None:\n efname = xml.SubElement(root, 'fullName')\n efname.text = str(packagename)\n\n #pretty format for xml\n xmlstring = xml.tostring(root)\n reparsed = minidom.parseString(xmlstring)\n prettyxml = reparsed.toprettyxml(indent=' ', newl='\\n', encoding='UTF-8')\n \n #generate xml file from string\n try:\n with open(os.path.join(directory, filename), \"bw\") as xml_file:\n xml_file.write(prettyxml)\n except IOError:\n pass", "def exporter(self):\n\t\tscriptsPath = utils.path_join(self.dir_install, self.dir_install_name, self.version, \"scripts\")\n\t\tif self.host_os == utils.MAC:\n\t\t\tscriptsPath = utils.path_join(self.dir_install, self.dir_install_name, \"blender.app\", \"Contents\", \"Resources\", self.version, \"scripts\")\n\n\t\taddonsPath = utils.path_join(scriptsPath, \"addons\")\n\t\tstartupPath = utils.path_join(scriptsPath, \"startup\")\n\n\t\tclonePath = addonsPath if self.vb30 else startupPath\n\n\t\tsys.stdout.write(\"Adding exporter...\\n\")\n\t\tsys.stdout.write(\" in: %s\\n\" % clonePath)\n\n\t\tif not self.mode_test:\n\t\t\tif not os.path.exists(clonePath):\n\t\t\t\tsys.stderr.write(\"Something went wrong! Can't add Python modules and exporter!\\n\")\n\t\t\t\tsys.exit(3)\n\n\t\t\tif self.vb30:\n\t\t\t\tos.chdir(clonePath)\n\t\t\t\texporterPath = utils.path_join(clonePath, \"vb30\")\n\t\t\t\tif os.path.exists(exporterPath):\n\t\t\t\t\tutils.remove_directory(exporterPath)\n\t\t\t\tos.system(\"git clone --recursive https://github.com/bdancer/vb30.git\")\n\n\t\t\telse:\n\t\t\t\tos.chdir(clonePath)\n\t\t\t\texporterPath = utils.path_join(clonePath, \"vb25\")\n\t\t\t\tif os.path.exists(exporterPath):\n\t\t\t\t\tutils.remove_directory(exporterPath)\n\n\t\t\t\tos.system(\"git clone --recursive https://github.com/bdancer/vb25.git\")\n\n\t\t\tif self.use_exp_branch not in {'master'}:\n\t\t\t\tos.chdir(exporterPath)\n\t\t\t\tos.system(\"git remote update\")\n\t\t\t\tos.system(\"git checkout -b {branch} origin/{branch}\".format(branch=self.use_exp_branch))\n\n\t\t\tos.chdir(exporterPath)\n\t\t\tos.system(\"git submodule update --init --recursive\")\n\t\t\tos.system(\"git submodule foreach git checkout master\")\n\t\t\tos.system(\"git submodule foreach git pull --rebase origin master\")", "def exports(self):\r\n return resources.Exports(self)", "def getSetupDict( script, app_name, app_version ):\r\n \r\n CleanDir( 'build' )\r\n CleanDir( app_name )\r\n \r\n setupDict = {}\r\n\r\n manifestRes = manifest_template % dict(prog=app_name)\r\n \r\n wd = {}\r\n wd['script'] = script\r\n #wd['icon_resources'] = [(1, iconFile)]\r\n wd['other_resources'] = [(RT_MANIFEST, 1, manifestRes)]\r\n wd['description'] = \"%s application\" % app_name\r\n wd['dest_base'] = app_name\r\n wd['version'] = app_version\r\n wd['company_name'] = \"Ginstrom IT Solutions (GITS)\"\r\n wd['copyright'] = \"(C) 2006 Ginstrom IT Solutions (GITS)\"\r\n wd['name'] = \"%s Application v %s\" % (app_name,app_version)\r\n \r\n setupDict['windows'] = [wd]\r\n setupDict['zipfile'] = None\r\n setupDict['data_files'] = [(\".\", glob.glob(\"./*.txt\") + glob.glob( \"./*.db\" ) )]\r\n\r\n excludes = [\"pywin\", \"pywin.debugger\", \"pywin.debugger.dbgcon\",\r\n \"pywin.dialogs\", \"pywin.dialogs.list\", \"win32com.server\"]\r\n \r\n options = {\"optimize\":2,\r\n \"dist_dir\":app_name,\r\n \"excludes\":excludes}\r\n \r\n setupDict['options'] = {\"py2exe\":options}\r\n \r\n return setupDict", "def write_bootswatch_less(env):\n dbname = env.cr.dbname\n addon_path = env['ir.config_parameter'].get_param(\n 'blue_custom_branding.addon_path')\n fname = \"{}/static/src/less/bootswatch_{}.less\".format(addon_path, dbname)\n\n companies = env['res.company'].search([])\n try:\n f = open(fname, \"w\")\n for company in companies:\n # &#123; = { &#125; = } // They get converted back when the files are merged.\n css_string = \"\"\"\n body.blue_theme__{database}__{company_id} a.oe_menu_toggler:hover,\n body.blue_theme__{database}__{company_id} a.oe_menu_toggler:focus &#123;\n background-color: darken(@brand-primary-{database}-{company_id}, 10%) !important;\n &#125;\n\n /* main navigation bar */\n body.blue_theme__{database}__{company_id} a.oe_menu_toggler,\n body.blue_theme__{database}__{company_id} #oe_main_menu_navbar,\n body.blue_theme__{database}__{company_id} .o_main_navbar &#123;\n background-color: @brand-primary-{database}-{company_id} !important ;\n border-color: @brand-primary-{database}-{company_id};\n &#125;\n\n body.blue_theme__{database}__{company_id} a.o_menu_toggle:hover,\n body.blue_theme__{database}__{company_id} a.o_menu_toggle:focus,\n body.blue_theme__{database}__{company_id} button.o_mobile_menu_toggle:hover,\n body.blue_theme__{database}__{company_id} button.o_mobile_menu_toggle:focus,\n body.blue_theme__{database}__{company_id} .o_main_navbar ul.o_menu_systray li > a:hover,\n body.blue_theme__{database}__{company_id} .o_main_navbar ul.o_menu_systray li > a:focus &#123;\n background-color: darken(@brand-primary-{database}-{company_id}, 10%) !important;\n &#125;\n\n @media (min-width: @grid-float-breakpoint-max) &#123;\n body.blue_theme__{database}__{company_id} body .o_main_navbar > ul > li > a[data-toggle=\"collapse\"]:hover,\n body.blue_theme__{database}__{company_id} body .o_main_navbar > ul > li > a[data-toggle=\"collapse\"]:focus &#123;\n background-color: @brand-info-{database}-{company_id} !important;\n &#125;\n &#125;\n\n body.blue_theme__{database}__{company_id} .o_list_view tfoot &#123;\n background-color: @brand-primary-{database}-{company_id};\n &#125;\n body.blue_theme__{database}__{company_id} .o_searchview .o_searchview_facet .o_searchview_facet_label &#123;\n background-color: @brand-primary-{database}-{company_id};\n &#125;\n body.blue_theme__{database}__{company_id} .o_form_view.o_form_editable .o_form_field .o_list_view td.o_readonly &#123;\n background-color: transparent;\n &#125;\n body.blue_theme__{database}__{company_id} .navbar &#123;\n &-default &#123;\n .badge &#123;\n background-color: #fff;\n color: @navbar-default-bg-{database}-{company_id};\n &#125;\n &#125;\n &-inverse &#123;\n .badge &#123;\n background-color: #fff;\n color: @navbar-inverse-bg-{database}-{company_id};\n &#125;\n &#125;\n &#125;\n\n body.blue_theme__{database}__{company_id} .o_form_view .o_notebook > ul.nav-tabs > li.active > a,\n body.blue_theme__{database}__{company_id} .o_form_view .o_notebook > ul.nav-tabs > li.active > a:hover,\n body.blue_theme__{database}__{company_id} .o_form_view .o_notebook > ul.nav-tabs > li.active > a:focus,\n body.blue_theme__{database}__{company_id} .o_form_view .o_notebook > ul.nav-tabs > li.active > a:active &#123;\n color: @brand-primary-{database}-{company_id};\n &#125;\n\n /* For the community version */\n /* This gets the developer mode button. */\n body.blue_theme__{database}__{company_id} .label-primary:hover,\n body.blue_theme__{database}__{company_id} .label-primary:focus,\n body.blue_theme__{database}__{company_id} .label-primary &#123;\n background-color: darken(@brand-primary-{database}-{company_id}, 10%) ;\n &#125;\n\n body.blue_theme__{database}__{company_id} .o_main_navbar &#123;\n background-color: @brand-primary-{database}-{company_id};\n border-color: @brand-primary-{database}-{company_id};\n &#125;\n body.blue_theme__{database}__{company_id} .o_main_navbar button:hover,\n body.blue_theme__{database}__{company_id} .o_main_navbar button:focus &#123;\n background-color: darken(@brand-primary-{database}-{company_id}, 10%) !important;\n &#125;\n\n\n /* This picks up the menu items that are open but lost focus. */\n body.blue_theme__{database}__{company_id} .o_main_navbar > li.open > a:focus,\n body.blue_theme__{database}__{company_id} .o_main_navbar > li.open > a[aria-expanded=\"true\"] &#123;\n background-color: darken(@brand-primary-{database}-{company_id}, 10%);\n &#125;\n\n /* This is the \"X\" button that closes debug mode */\n body.blue_theme__{database}__{company_id} a[data-action=\"leave_debug_mode\"]:hover &#123;\n background-color: darken(@brand-primary-{database}-{company_id}, 10%);\n &#125;\n\n @media (min-width: @grid-float-breakpoint-max) &#123;\n body.blue_theme__{database}__{company_id} .o_main_navbar > li > a.oe_menu_toggler &#123;\n background-color: @brand-primary-{database}-{company_id} !important;\n &#125;\n &#125;\n\n @media (max-width: @grid-float-breakpoint-max) &#123;\n body.blue_theme__{database}__{company_id} .o_main_navbar a:hover,\n body.blue_theme__{database}__{company_id} .o_main_navbar a:focus &#123;\n background-color: darken(@brand-primary-{database}-{company_id}, 10%) !important;\n &#125;\n &#125;\n\n @media (min-width: @grid-float-breakpoint-max) &#123;\n body.blue_theme__{database}__{company_id} .o_main_navbar > li > a.oe_menu_toggler:focus,\n body.blue_theme__{database}__{company_id} .o_main_navbar > li > a.oe_menu_toggler:active,\n body.blue_theme__{database}__{company_id} .o_main_navbar > li > a.oe_menu_toggler:hover,\n body.blue_theme__{database}__{company_id} .o_main_navbar > li > a[data-toggle=\"dropdown\"]:hover,\n body.blue_theme__{database}__{company_id} .o_main_navbar > li > a[data-toggle=\"dropdown\"]:focus,\n body.blue_theme__{database}__{company_id} .o_main_navbar > li > a[data-toggle=\"collapse\"]:hover,\n body.blue_theme__{database}__{company_id} .o_main_navbar > li > a[data-toggle=\"collapse\"]:focus,\n body.blue_theme__{database}__{company_id} .o_main_navbar > .open > a &#123;\n background-color: darken(@brand-primary-{database}-{company_id}, 10%) !important;\n &#125;\n &#125;\n\n body.blue_theme__{database}__{company_id} .o_main_navbar &#123;\n border-color: darken(@brand-primary-{database}-{company_id}, 10%) !important;\n &#125;\n body.blue_theme__{database}__{company_id} .o_main_navbar .o_menu_brand &#123;\n border-bottom: 1px solid darken(@brand-primary-{database}-{company_id}, 10%);\n &#125;\n body.blue_theme__{database}__{company_id}.o_web_client .navbar .o_menu_toggle:hover &#123;\n background-color: darken(@brand-primary-{database}-{company_id}, 10%) !important;\n &#125;\n body.blue_theme__{database}__{company_id}.o_web_client .o_main_navbar > ul > li > a:hover,\n body.blue_theme__{database}__{company_id}.o_web_client .o_main_navbar > ul > li > a:hover,\n body.blue_theme__{database}__{company_id}.o_web_client .o_main_navbar .dropdown-toggle:hover,\n body.blue_theme__{database}__{company_id}.o_web_client .o_main_navbar .dropdown-toggle:focus &#123;\n background-color: darken(@brand-primary-{database}-{company_id}, 10%) !important;\n &#125;\n body.blue_theme__{database}__{company_id} .o_list_view tfoot &#123;\n background-color: @brand-primary-{database}-{company_id};\n &#125;\n body.blue_theme__{database}__{company_id} .o_searchview .o_searchview_facet .o_searchview_facet_label &#123;\n background-color: @brand-primary-{database}-{company_id};\n &#125;\n body.blue_theme__{database}__{company_id} .o_form_view.o_form_editable .o_form_field .o_list_view td.o_readonly &#123;\n background-color: transparent;\n &#125;\n body.blue_theme__{database}__{company_id} .navbar &#123;\n &-default &#123;\n .badge &#123;\n background-color: #fff;\n color: @navbar-default-bg-{database}-{company_id};\n &#125;\n &#125;\n &-inverse &#123;\n .badge &#123;\n background-color: #fff;\n color: @navbar-inverse-bg-{database}-{company_id};\n &#125;\n &#125;\n &#125;\n \"\"\".format(\n database=dbname,\n company_id=company.id)\n\n if company.override_home:\n css_string += '''\n body.blue_theme__{database}__{company_id} .o_application_switcher &#123;\n background: -webkit-gradient(linear, left top, right bottom,\n from(@brand-info-{database}-{company_id}),\n to(darken(@brand-info-{database}-{company_id}, 10%))\n );\n &#125;\n '''.format(\n database=dbname,\n company_id=company.id)\n\n f.write(css_string)\n f.close()\n except Exception as e:\n _logger.debug('Theme error writing to file : %s' % e)", "def export(\n self,\n dest_file: Optional[IO[str]] = None, *,\n inc_version: bool = True,\n minimal: bool = False,\n disp_multiblend: bool = True,\n ) -> Optional[str]:\n if dest_file is None:\n string_buf = io.StringIO()\n dest_file = string_buf\n else:\n string_buf = None\n\n if inc_version:\n # Increment this to indicate the map was modified\n self.map_ver += 1\n\n dest_file.write('versioninfo\\n{\\n')\n dest_file.write(f'\\t\"editorversion\" \"{self.hammer_ver}\"\\n')\n dest_file.write(f'\\t\"editorbuild\" \"{self.hammer_build}\"\\n')\n dest_file.write(f'\\t\"mapversion\" \"{self.map_ver}\"\\n')\n dest_file.write(f'\\t\"formatversion\" \"{self.format_ver}\"\\n')\n dest_file.write('\\t\"prefab\" \"' +\n srctools.bool_as_int(self.is_prefab) + '\"\\n}\\n')\n\n dest_file.write('visgroups\\n{\\n')\n for vis in self.vis_tree:\n vis.export(dest_file, ind='\\t')\n dest_file.write('}\\n')\n\n if not minimal:\n dest_file.write('viewsettings\\n{\\n')\n dest_file.write('\\t\"bSnapToGrid\" \"' +\n srctools.bool_as_int(self.snap_grid) + '\"\\n')\n dest_file.write('\\t\"bShowGrid\" \"' +\n srctools.bool_as_int(self.show_grid) + '\"\\n')\n dest_file.write('\\t\"bShowLogicalGrid\" \"' +\n srctools.bool_as_int(self.show_logic_grid) + '\"\\n')\n dest_file.write(f'\\t\"nGridSpacing\" \"{self.grid_spacing}\"\\n')\n dest_file.write('\\t\"bShow3DGrid\" \"' +\n srctools.bool_as_int(self.show_3d_grid) + '\"\\n}\\n')\n\n # The worldspawn version should always match the global value.\n # Also force the classname, since this will crash if it's different.\n self.spawn['mapversion'] = str(self.map_ver)\n self.spawn['classname'] = 'worldspawn'\n self.spawn.export(dest_file, disp_multiblend=disp_multiblend, _is_worldspawn=True)\n del self.spawn['mapversion']\n\n for ent in self.entities:\n ent.export(dest_file, disp_multiblend=disp_multiblend)\n\n if not minimal:\n dest_file.write('cameras\\n{\\n')\n if len(self.cameras) == 0:\n self.active_cam = -1\n dest_file.write(f'\\t\"activecamera\" \"{self.active_cam}\"\\n')\n for cam in self.cameras:\n cam.export(dest_file, '\\t')\n dest_file.write('}\\n')\n\n dest_file.write('cordons\\n{\\n')\n if len(self.cordons) > 0:\n dest_file.write('\\t\"active\" \"' +\n srctools.bool_as_int(self.cordon_enabled) +\n '\"\\n')\n for cord in self.cordons:\n cord.export(dest_file, '\\t')\n else:\n dest_file.write('\\t\"active\" \"0\"\\n')\n dest_file.write('}\\n')\n\n if self.quickhide_count > 0:\n dest_file.write(\n 'quickhide\\n'\n '{\\n'\n f'\\t\"count\" \"{self.quickhide_count}\"\\n'\n '}\\n'\n )\n\n if string_buf is not None:\n return string_buf.getvalue()\n else:\n return None", "def ExportMigrations():\n\n # Import MigrationExecutor lazily. MigrationExecutor checks at\n # import time that the apps are ready, and they are not when\n # django_prometheus is imported. ExportMigrations() should be\n # called in AppConfig.ready(), which signals that all apps are\n # ready.\n from django.db.migrations.executor import MigrationExecutor\n\n if \"default\" in connections and (isinstance(connections[\"default\"], DatabaseWrapper)):\n # This is the case where DATABASES = {} in the configuration,\n # i.e. the user is not using any databases. Django \"helpfully\"\n # adds a dummy database and then throws when you try to\n # actually use it. So we don't do anything, because trying to\n # export stats would crash the app on startup.\n return\n for alias in connections.databases:\n executor = MigrationExecutor(connections[alias])\n ExportMigrationsForDatabase(alias, executor)", "def export_caliper(args):\n if args.type == 'normalise':\n clarity_epp.export.caliper.samplesheet_normalise(lims, args.process_id, args.output_file)\n elif args.type == 'dilute':\n clarity_epp.export.caliper.samplesheet_dilute(lims, args.process_id, args.output_file)", "def export(cls, client, resource) :\n\t\ttry :\n\t\t\tif type(resource) is not list :\n\t\t\t\texportresource = appfwcustomsettings()\n\t\t\t\texportresource.name = resource.name\n\t\t\t\texportresource.target = resource.target\n\t\t\t\treturn exportresource.perform_operation(client,\"export\")\n\t\texcept Exception as e :\n\t\t\traise e", "def export(self) -> Dict[str, Any]:\n return {\n \"name\": self.name,\n \"channels\": [channel for channel in self.channels],\n \"packages\": self.packages.export(),\n \"logs\": [log for log in self.logs],\n \"actions\": [action for action in self.actions],\n \"debug\": [debug for debug in self.debug],\n }", "def generate_basic_modules(template_dir=TEMPLATE_DIR, out_dir=PKG_DIR):\n print(80 * \"-\")\n print(\"Package:\", out_dir)\n\n basic_modules = [\"_init.py\",\n \"constants.py\",\n \"base_api.py\",\n \"exception.py\"]\n\n if not os.path.exists(out_dir):\n os.mkdir(out_dir)\n\n installed = []\n for module in basic_modules:\n in_file = os.path.join(template_dir, module)\n\n if module == \"_init.py\":\n module = \"__init__.py\"\n\n out_file = os.path.join(out_dir, module)\n try:\n shutil.copy(in_file, out_file)\n except (FileNotFoundError, shutil.SameFileError) as err:\n print(err)\n installed.append(\"- \" + out_file)\n\n print(\"Basic modules:\")\n print(\"\\n\".join(installed))", "def archive_backup(self):\n\n # Archiving the Training script\n shutil.copyfile(self.script_path, self.save_path + '/0-' + os.path.basename(self.script_path))\n os.chmod(self.save_path + '/0-' + os.path.basename(self.script_path), 0o755)\n # Archiving the src folder\n pkg_path = os.path.dirname(arch_src)\n backup_path = os.path.join(self.save_path, 'src_backup')\n shutil.make_archive(backup_path, 'gztar', pkg_path)\n\n # Archiving the Environment Info\n env_info = collect_env.get_pretty_env_info()\n with open(self.save_path + '/env_info.txt', 'w') as f:\n f.write(env_info)", "def main():\n \n root = Folder(name=os.getcwd(), file='meta.json',\n collection='.github/jekyll')\n root.update()\n root.export_folders(True)", "def export_tenants(self):\n print('\\n=== Exporting all tenant data...')\n\n tenant = dict(self.client.tenant)\n\n print('- Exporting tenant:', tenant['name'])\n\n json = {\n 'id': self.get_id(tenant),\n 'href': tenant['href'],\n 'name': tenant['name'],\n 'key': tenant['key'],\n 'createdAt': tenant['created_at'].isoformat(),\n 'modifiedAt': tenant['modified_at'].isoformat(),\n 'customData': self.get_custom_data(tenant),\n }\n\n #for application in tenant.applications:\n\n self.write('%s/%s/meta' % (self.location, json['id']), json)\n\n print('=== Done!\\n')", "def handle_package(self, prime_dir, bases_config: BasesConfiguration):\n emit.progress(\"Creating the package itself\")\n zipname = format_charm_file_name(self.config.name, bases_config)\n zipfh = zipfile.ZipFile(zipname, \"w\", zipfile.ZIP_DEFLATED)\n for dirpath, _dirnames, filenames in os.walk(prime_dir, followlinks=True):\n dirpath = pathlib.Path(dirpath)\n for filename in filenames:\n filepath = dirpath / filename\n zipfh.write(str(filepath), str(filepath.relative_to(prime_dir)))\n\n zipfh.close()\n return zipname", "def magento_modify_composer(composer):\n\n composer['name']= \"platformsh/{0}\".format(projectName)\n composer['description']= \"Magento 2 CE(Community Edition) for Platform.sh\"\n\n composer['repositories'] = {\n \"Magento Repo Auth Required\": {\n \"type\": \"composer\",\n \"url\": \"https://repo.magento.com\"\n },\n \"ECE-Tools\": {\n \"type\": \"git\",\n \"url\": \"https://github.com/magento/ece-tools.git\"\n },\n \"Magento Cloud Components\": {\n \"type\": \"git\",\n \"url\": \"https://github.com/magento/magento-cloud-components.git\"\n },\n \"Magento Cloud Patches\": {\n \"type\": \"git\",\n \"url\": \"https://github.com/magento/magento-cloud-patches.git\"\n },\n \"Magento Quality Patches\": {\n \"type\": \"git\",\n \"url\": \"https://github.com/magento/quality-patches.git\"\n }\n }\n\n return composer", "def compress():\n run_manage_cmd('compress_assets')", "def _export_dependencies(self):\n # TODO add export of category\n attribute_binder = self.get_binder_for_model(\n 'prestashop.product.combination.option')\n option_binder = self.get_binder_for_model(\n 'prestashop.product.combination.option.value')\n for value in self.erp_record.attribute_value_ids:\n attribute_ext_id = attribute_binder.to_backend(\n value.attribute_id.id, unwrap=True)\n if not attribute_ext_id:\n ctx = self.session.context.copy()\n ctx['connector_no_export'] = True\n attribute_ext_id = self.session.pool[\n 'prestashop.product.combination.option'].create(\n self.session.cr, self.session.uid, {\n 'backend_id': self.backend_record.id,\n 'openerp_id': value.attribute_id.id}, context=ctx)\n export_record(\n self.session,\n 'prestashop.product.combination.option',\n attribute_ext_id\n )\n value_ext_id = option_binder.to_backend(value.id,\n unwrap=True)\n if not value_ext_id:\n ctx = self.session.context.copy()\n ctx['connector_no_export'] = True\n value_ext_id = self.session.pool[\n 'prestashop.product.combination.option.value'].create(\n self.session.cr, self.session.uid, {\n 'backend_id': self.backend_record.id,\n 'openerp_id': value.val_id.id,\n 'id_attribute_group': attribute_ext_id}, context=ctx)\n export_record(self.session,\n 'prestashop.product.combination.option.value',\n value_ext_id)", "def to_egg(dest_dir):\n return os.path.join('EGG-INFO', 'scripts', dest_dir)", "def write_package_scripts(self, output_dir):\n manifest_sh = os.path.join(output_dir, 'manifest.pkgs.sh')\n installed_sh = os.path.join(output_dir, 'installed.pkgs.sh')\n\n minimal_sh = os.path.join(output_dir, 'minimal.pkgs.sh')\n also_installed_sh = os.path.join(output_dir, 'also_installed.pkgs.sh')\n uninstalled_sh = os.path.join(output_dir, 'uninstalled.pkgs.sh')\n\n with open(manifest_sh, 'w') as f:\n for pkgname in self.manifest:\n print(\"manifest: %s\" % pkgname)\n f.write(\"apt-get install %s\" % pkgname)\n f.write(\"\\n\")\n with open(installed_sh, 'w') as f:\n for pkgname in self.manifest:\n print(\"installed: %s\" % pkgname)\n f.write(\"apt-get install %s\" % pkgname)\n f.write(\"\\n\")\n\n with open(minimal_sh, 'w') as f:\n for pkgname in self.minimal:\n print(\"min: %s\" % pkgname)\n f.write(\"apt-get install %s\" % pkgname)\n f.write(\"\\n\")\n with open(also_installed_sh, 'w') as f:\n for pkgname in self.also_installed:\n print(\"als: %s\" % pkgname)\n f.write(\"apt-get install %s\" % pkgname)\n f.write(\"\\n\")\n with open(uninstalled_sh, 'w') as f:\n for pkgname in self.uninstalled:\n print(\"uni: %s\" % pkgname)\n f.write(\"apt-get remove %s\" % pkgname)\n f.write(\"\\n\")", "def _add_scripts(prefix):\n mapping = {\"MAST_HOME\": prefix}\n if \"Windows\" in platform.system():\n script_dir = os.path.join(INSTALL_DIR, \"files\", \"windows\")\n files = [\n \"mast.bat\",\n \"mast-system.bat\",\n \"mast-accounts.bat\",\n \"mast-backups.bat\",\n \"mast-crypto.bat\",\n \"mast-deployment.bat\",\n \"mast-developer.bat\",\n \"mast-network.bat\",\n \"test-mast.bat\",\n \"mast-version.bat\",\n \"mast-web.bat\",\n \"mastd.bat\",\n \"mast-ssh.bat\",\n \"set-env.bat\",\n ]\n elif \"Linux\" in platform.system():\n script_dir = os.path.join(INSTALL_DIR, \"files\", \"linux\")\n files = [\n \"mast\",\n \"mast-system\",\n \"mast-accounts\",\n \"mast-backups\",\n \"mast-crypto\",\n \"mast-deployment\",\n \"mast-developer\",\n \"mast-network\",\n \"test-mast\",\n \"mast-version\",\n \"mast-web\",\n \"mast-ssh\",\n \"mastd\",\n \"set-env\",\n ]\n\n for f in files:\n dst = os.path.join(prefix, f)\n src = os.path.join(script_dir, f)\n print(\"{} -> {}\".format(src, dst))\n content = render_template_file(src, mapping)\n write_file(dst, content)\n if \"Linux\" in platform.system():\n os.chmod(dst, 0o755)\n\n if \"Windows\" in platform.system():\n # copy python27.dll to site-packages/win32 directory to get around\n # issue when starting mastd\n src = os.path.join(prefix, \"miniconda\", \"python27.dll\")\n dst = os.path.join(\n prefix,\n \"miniconda\",\n \"Lib\",\n \"site-packages\",\n \"win32\",\n \"python27.dll\"\n )\n copyfile(src, dst)\n for filename in [\"pythoncom27.dll\", \"pythoncomloader27.dll\", \"pywintypes27.dll\"]:\n src = os.path.join(\n prefix,\n \"miniconda\",\n \"Lib\",\n \"site-packages\",\n \"pywin32_system32\",\n filename,\n )\n dst = os.path.join(\n prefix,\n \"miniconda\",\n \"Lib\",\n \"site-packages\",\n \"win32\",\n filename,\n )\n copyfile(src, dst)\n copytree(\n os.path.join(INSTALL_DIR, \"files\", \"bin\"),\n os.path.join(prefix, \"bin\")\n )\n copytree(\n os.path.join(INSTALL_DIR, \"files\", \"etc\"),\n os.path.join(prefix, \"etc\")\n )\n copytree(\n os.path.join(INSTALL_DIR, \"files\", \"var\"),\n os.path.join(prefix, \"var\")\n )\n copytree(\n os.path.join(INSTALL_DIR, \"files\", \"usrbin\"),\n os.path.join(prefix, \"usrbin\")\n )\n copytree(\n os.path.join(INSTALL_DIR, \"files\", \"tmp\"),\n os.path.join(prefix, \"tmp\")\n )\n copytree(\n os.path.join(INSTALL_DIR, \"files\", \"doc\"),\n os.path.join(prefix, \"doc\")\n )\n copytree(\n os.path.join(INSTALL_DIR, \"files\", \"contrib\"),\n os.path.join(prefix, \"contrib\")\n )", "def index():\r\n\r\n module_name = deployment_settings.modules[module].name_nice\r\n response.title = module_name\r\n return dict(module_name=module_name)", "def get(self) :\n self.generate('export.html', {\n 'xml' : export(),\n 'title' : \"Admin Export\"})" ]
[ "0.54642105", "0.5087354", "0.49709123", "0.49552384", "0.49165198", "0.48934224", "0.48820457", "0.487595", "0.47681433", "0.47274783", "0.47087747", "0.46943507", "0.4635901", "0.46243486", "0.4618999", "0.46112907", "0.4579613", "0.45660037", "0.4562099", "0.4551834", "0.4548826", "0.45429224", "0.45351264", "0.45301363", "0.45252913", "0.4513647", "0.45132235", "0.45130485", "0.44672826", "0.44495544" ]
0.65367186
0
Ecrit le nom dans self._nom.
def _set_nom(self, nouveau_nom): self._nom = nouveau_nom
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_nom(self):\n return self._nom", "def nome(self):\n\n return self.__nome", "def nombre(self) -> str:\n return self._nombre", "def __str__(self):\n return self.nom", "def __str__(self):\n return self.nom", "def __str__(self):\n return self.nom", "def __str__(self):\n return self.nom", "def __str__(self):\n return self.nom", "def set_nom(self, annonce):\n n= annonce.find_element_by_class_name('prdtBILTit')\n self.nom = n.text", "def get_nombre(self):\n return self.__nombre", "def nom_comp(self):\n return self._nom_comp", "def nombre(self):\n return self.persona.nombre", "def getName(self):\n return \"\"", "def set_nom_joueur(self, nom_joueur):\n\n self._nom_joueur = nom_joueur", "def set_name(self):\n if self.first_name and self.last_name:\n name_string = \"%s\" % self.first_name\n name_string += \" %s\" % self.last_name\n self.name = name_string\n\n if self.name:\n if not self.first_name and not self.last_name:\n n = HumanName(self.name)\n self.first_name = n.first\n if n.middle:\n self.first_name = n.first + \" \" + n.middle\n self.last_name = n.last\n if n.suffix:\n self.last_name = n.last + \" \" + n.suffix", "def sanitize_name(self):\n self._name = self.get_name().strip()", "def get_name(self):\n return self.normalize_name(self.name)", "def name_with_title(self):\n return \"%s %s\" % (self.title, self.name)", "def name(self):\n return f\"{self._name.replace('_', ' ')}\".title()", "def get_nombre_completo(self):\n return f\"{self.nombre} {self.apellido}\"", "def __str__(self):\n\t\treturn self.titre", "def get_name(self):\r\n return ('%s %s' % ( self.first_name, self.last_name ))", "def get_full_name(self):\r\n full_name = '%s' % (self.name)\r\n return full_name.strip()", "def name_title(self) -> str:\n return self._name_title", "def name(self):\n\n if not hasattr(self, \"_name\"):\n name = Doc.get_text(self.doc.find(\"PreferredName\", \"\"))\n self._name = name.strip()\n return self._name", "def name(self):\n return \"%s %s\" % (self.first_name, self.last_name)", "def full_name(self):\n return u\"{} {}\".format(self.pref_first_name(), self.last_name)", "def fullname(self):\n return '{} {}'.format(self.fname,self.lname)", "def get_name(self) -> str:\n return self.name + \" - \\u20ac\" + str(self.price)", "def get_name(self):\n return \"%s %s\" % (\n self.first_name,\n self.last_name\n )" ]
[ "0.77840674", "0.7456993", "0.7332514", "0.72257555", "0.72257555", "0.72257555", "0.72257555", "0.72257555", "0.7148263", "0.70737576", "0.7070422", "0.68932337", "0.68317574", "0.6826248", "0.6705802", "0.6662138", "0.6648886", "0.66418374", "0.66198313", "0.66132355", "0.6608464", "0.6608133", "0.65931183", "0.6573385", "0.65637326", "0.65341216", "0.6510465", "0.6501009", "0.649528", "0.6494364" ]
0.7776664
1
Fait gagner de l'XP au personnage.
def gagner_xp(self, niveau=None, xp=0, retour=True): ancien_niveau = self.niveau res = Personnage.gagner_xp(self, niveau, xp, retour) importeur.hook["pnj:gagner_xp"].executer(self, niveau, xp, retour) if self.niveau > ancien_niveau: importeur.hook["pnj:gagner_niveau"].executer(self, self.niveau) return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_xp(self):\n pass", "def get_X_gcn(self, p):\n\n if p == 0:\n return preprocess_feature(self.X)\n else:\n return preprocess_feature(self._get_X_rdm(p))\n\n return rdm_feature(self.X, percent=p)", "def x(self):\n pass", "def g(self):\n return 2", "def set_xpdom():\n xp = _input(\"How much xp do you have?\")\n dominion = _input(\"How much dominion do you have?\")\n return xp, dominion", "def G():\n Pz=[40]\n Pp=[1,2,1]\n return Pz, Pp", "def gxvoxe(self):\n if self._gxvoxe is None:\n self._gxvoxe = gxapi.GXVOXE.create(self.gxvox)\n return self._gxvoxe", "def getXp(self, Xs_minus, Vs_minus, As_minus):\n return Xs_minus + Vs_minus + 0.5*As_minus", "def gxpg(self):\n\n if self._pg is None:\n self._pg = self.gxvox.create_pg()\n return self._pg", "def pos_x(self, *args, **kwargs) -> Any:\n pass", "def addXp(self, amount, reason = \"\", tellUserOverride = True):\r\n debug.write(\"[SourceRPG] Handling addXp function for userid %s\" % self.userid, 1)\r\n if not amount:\r\n debug.write(\"No experience given, return early\", 1)\r\n return\r\n \r\n \"\"\" If turbo mode is on the multiply the experience gained \"\"\"\r\n if currentTurboMode:\r\n debug.write(\"Turbo mode is on, multiply experience gain\", 2)\r\n amount = int( amount * float(turboXpMultiplier))\r\n \r\n oldXp = self.player['xp']\r\n currentXp = amount + oldXp\r\n currentLevel = self.player['level']\r\n debug.write(\"OldXp: %s, currentXp: %s, currentLevel: %s\" % (oldXp, currentXp, currentLevel), 2 )\r\n \r\n amountOfLevels = 0\r\n nextLevelXp = (currentLevel - 1) * int(xpIncrement) + int(startXp)\r\n \r\n \"\"\" Ensure that multiple levels are added as one instance \"\"\"\r\n while currentXp > nextLevelXp:\r\n amountOfLevels += 1\r\n currentXp -= nextLevelXp\r\n nextLevelXp += int(xpIncrement)\r\n \r\n debug.write(\"Amount of levels gained: %s\" % amountOfLevels, 2)\r\n \r\n \"\"\" If the server owner wishes, tell them the message \"\"\"\r\n if tellUserOverride is True and int(announceXp):\r\n tokens = {}\r\n tokens['name'] = self.player.name\r\n tokens['amount'] = amount\r\n tokens['reason'] = (\" for \" + reason) if reason else \"\"\r\n tell(self.userid, 'xp gained', tokens)\r\n \r\n \"\"\" Assign the new XP value \"\"\"\r\n self.player['xp'] = currentXp\r\n \r\n \"\"\" Create an fire the gainxp event \"\"\"\r\n values = {}\r\n values[\"oldxp\"] = (\"setint\", oldXp)\r\n values[\"newxp\"] = (\"setint\", currentXp)\r\n values[\"userid\"] = (\"setint\", self.userid)\r\n values[\"levels\"] = (\"setint\", amountOfLevels)\r\n values[\"xpneeded\"] = (\"setint\", nextLevelXp)\r\n values[\"reason\"] = (\"setstring\", reason if reason else \" \")\r\n gamethread.delayed(0, fireEvent, (\"sourcerpg_gainxp\", values))\r\n \r\n debug.write(\"[SourceRPG] addXP handled\", 2)\r\n \r\n if amountOfLevels:\r\n self.addLevel( amountOfLevels )", "def x_lb(self):\n pass", "def X(self):\n return self.x\n pass", "def x ( self ) :\n return self.xvar", "def show_x(self):\n print(self.x)", "def funcG(p, x):\n A, mu, sigma, zerolev = p\n return( A * numpy.exp(-(x-mu)*(x-mu)/(2*sigma*sigma)) + zerolev )", "def g():", "def g_P10(x,alpha=1.):\n if alpha < alpha_grid_P10.min():\n alpha = alpha_grid_P10.min()\n if alpha > alpha_grid_P10.max():\n alpha = alpha_grid_P10.max()\n mu_vmax = mu_vmax_interp_P10(alpha)\n eta_vmax = eta_vmax_interp_P10(alpha)\n mu_rmax = mu_rmax_interp_P10(alpha)\n eta_rmax = eta_rmax_interp_P10(alpha)\n y = 2./(1.+x)\n return y**mu_vmax * x**eta_vmax, y**mu_rmax * x**eta_rmax", "def userToPlotX(x):\n return dislin.nxposn(x)", "def addExperience(self, xp):\n self.xp += xp\n if self.xp >= self.xpNeeded:\n self.LevelUpPlayer()", "def xp_per_min(self):\n return self._xp_per_min", "def Gd():\n Pz=[8]\n Pp=[1,1]\n return Pz, Pp", "def X(self):\n return self.__X", "def radiant_xp_adv(self):\n return self._get(\"radiant_xp_adv\")", "def xy(self):\n ...", "def cx():", "def gx(Xn):\n gofx = np.sqrt(2 * np.pi) / (1 + Xn**4)\n return gofx", "def test_set_xp(self):\n s = State(substance=\"water\")\n s.xp = Q_(0.28475636946248034, \"dimensionless\"), Q_(101325.0, \"Pa\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(373.1242958476843, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.xp[0], Q_(0.2847563694624, \"dimensionless\")) # type: ignore\n assert np.isclose(s.xp[1], Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.u, Q_(1013250, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(3028.9867985920914, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(0.4772010021515822, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(1061602.391543017, \"J/kg\")) # type: ignore\n assert np.isclose(s.x, Q_(0.28475636946248034, \"dimensionless\")) # type: ignore", "def x(self):\n return self.x", "def get_ship_x(self):\n return self.x" ]
[ "0.6880218", "0.56909996", "0.56083816", "0.5560952", "0.55240375", "0.5451451", "0.5436478", "0.54063666", "0.5388223", "0.53876704", "0.5383432", "0.53684527", "0.5349016", "0.5337466", "0.5316359", "0.5309959", "0.53087145", "0.5276371", "0.5268031", "0.52416223", "0.5239382", "0.52350324", "0.5225562", "0.51956284", "0.51753634", "0.5170611", "0.5154317", "0.5153655", "0.5152278", "0.513589" ]
0.7363098
0
Le personnage self vient de tuer la victime.
def tuer(self, victime): self.script["tue"].executer(personnage=victime, pnj=self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wall_time(self):", "def _self_time(self):\r\n return self.duration() - sum([child.duration() for child in self.children])", "def verif_victoire(self):\n\n if self._mot_en_cours == self._mot_a_trouver :\n return True\n else :\n return False", "def getlife(self):\n return self.vida", "def get_life(self):\r\n return self.__lives", "def __pass_time(self):\n self.hunger += 1\n self.boredom += 1", "def cool(self):\n self.t = self.t - 1", "def current_time(cls) -> float:", "def theta_v_time():\n pass", "def time(self):\r\n raise NotImplementedError", "def time(self) -> int:\n pass", "def is_vintage(self):\n return self.get_age()>=AGE", "def __init__(self) -> None:\n self.time_passed = -1.0", "def GAME_TIME_ADVANCE(dt):", "def time(self):\n raise NotImplementedError()", "def morir(self):\n self.energia = 0\n self.vivo = False", "def life_time(self) -> int:\n\n return self._life_time", "def getTime(self):\n return self.time", "def age(self, agent):\n return (self.time - agent.born)/52.0", "def get_life(self):\n return self.life", "def get_age(self):\n return Guitar.CURRENT_YEAR - self.year", "def tic(self):\n return self._timestamp", "def get_time(self) -> float:\n self.rocket.update()\n return self.rocket.time", "def realtime(self):", "def Salvage(self):\n pass", "def unaccounted_time(self):\r\n return 0 if len(self.children) == 0 else self._self_time()", "def get_eclampsia_time(self):\n # return survival time only if the patient has died\n if self.get_if_eclampsia():\n return self._eclampsiaTime\n else:\n return None", "def time_slot(self):\n pass", "def update_time(self):\n pass # Do nothing", "def tournament(self):\n pass" ]
[ "0.5980434", "0.58657086", "0.57679105", "0.5758027", "0.5736961", "0.5723264", "0.5660789", "0.5651134", "0.5624631", "0.5586687", "0.55453277", "0.55258965", "0.5510504", "0.55059576", "0.55057245", "0.54876184", "0.54848033", "0.54786915", "0.54726285", "0.5472027", "0.545086", "0.5419824", "0.54047495", "0.5390698", "0.537093", "0.5358416", "0.5355227", "0.5340359", "0.5305627", "0.52889585" ]
0.6620466
0
Retourne True si le personnage est noyable, False sinon.
def noyable(self): return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __bool__(self):\n return self.__nonzero__()", "def __nonzero__(self):\r\n return bool(assert_(self.obj, 'not %r' % self.obj))", "def is_donor(self):\n return True", "def __bool__(self):\n return self is TRUE", "def __bool__(self):\n return not self.undefine", "def __bool__(self):\n return True if self._name is not None else False", "def __nonzero__(self):\n return True", "def __bool__(self):\n return not(self.outcome != 0 or self.filled)", "def __bool__(self):\n return self.isValid()", "def is_neutral(self, P):\n\t\treturn P.x is None", "def __bool__(self):\r\n return self.valid", "def __bool__(self):\n return self.balance > 0", "def uni(self):\n return not self.nni", "def optional(self) -> bool:\n return False", "def is_emptiable(self) -> bool:\n raise NotImplementedError()", "def return_false(self):\n return False", "def __bool__(self):\n\n return not self.is_empty()", "def __nonzero__(self):\n return self.__bool__()", "def __nonzero__(self):\n return self.__bool__()", "def is_mentor(self):\n return self.user_profile_status == self.MENTOR", "def is_null(self) -> bool:\n return self.allele1 == -1 and self.allele2 == -1", "def __bool__(self) -> bool:\n return bool(mpmath.rand() < self.p)", "def __bool__(self):\n return self.is_valid", "def __nonzero__(self):\n return not (self.year is None and\n self.month is None and\n self.day is None)", "def __bool__(self):\n return bool(self.obj)", "def est_nul(self):\n\t\tif self.__valide:\n\t\t\treturn (self.degre() == 0) and (self.valuation().est_nul())\n\t\telse:\n\t\t\treturn False", "def is_personal(self):\n return self.user_id is not None", "def has_componente(self, persona):\n return True if persona.pk in self.pks_componenti else False", "def have_mister(self):\n return bool(self.mister)", "def are_all_snp_disabled(self):\r\n for snp in self.snp:\r\n if snp.attributes.active:\r\n return False\r\n self.primers_are_useless()\r\n return True" ]
[ "0.6540811", "0.65173805", "0.6504292", "0.6462654", "0.6444503", "0.64119697", "0.635402", "0.63263655", "0.6298532", "0.6295683", "0.6285922", "0.6284034", "0.6282235", "0.6278732", "0.6273903", "0.6253428", "0.62380856", "0.6231243", "0.6231243", "0.6210731", "0.6136528", "0.6122171", "0.6113861", "0.61122435", "0.61042154", "0.60925025", "0.6084897", "0.6083034", "0.6079687", "0.6078365" ]
0.7022764
0
Accept reject sample from a probability distribution.
def accept_reject_sample(prob: Callable, n: int, limits: Space, sample_and_weights_factory: Callable = UniformSampleAndWeights, dtype=ztypes.float, prob_max: Union[None, int] = None, efficiency_estimation: float = 1.0) -> tf.Tensor: multiple_limits = limits.n_limits > 1 # if limits.n_limits == 1: # lower, upper = limits.limits # lower = ztf.convert_to_tensor(lower[0], dtype=dtype) # upper = ztf.convert_to_tensor(upper[0], dtype=dtype) sample_and_weights = sample_and_weights_factory() n = tf.to_int64(n) def enough_produced(n, sample, n_total_drawn, eff): return tf.greater(n, tf.shape(sample, out_type=tf.int64)[0]) def sample_body(n, sample, n_total_drawn=0, eff=1.0): if sample is None: n_to_produce = n else: n_to_produce = n - tf.shape(sample, out_type=tf.int64)[0] do_print = settings.get_verbosity() > 5 if do_print: print_op = tf.print("Number of samples to produce:", n_to_produce, " with efficiency ", eff) with tf.control_dependencies([print_op] if do_print else []): n_to_produce = tf.to_int64(ztf.to_real(n_to_produce) / eff * 1.01) + 100 # just to make sure # TODO: adjustable efficiency cap for memory efficiency (prevent too many samples at once produced) n_to_produce = tf.minimum(n_to_produce, tf.to_int64(5e5)) # introduce a cap to force serial rnd_sample, thresholds_unscaled, weights, weights_max, n_drawn = sample_and_weights(n_to_produce=n_to_produce, limits=limits, dtype=dtype) # if n_produced is None: # raise ShapeIncompatibleError("`sample_and_weights` has to return thresholds with a defined shape." # "Use `Tensor.set_shape()` if the automatic propagation of the shape " # "is not available.") n_total_drawn += n_drawn n_total_drawn = tf.to_int64(n_total_drawn) probabilities = prob(rnd_sample) if prob_max is None: # TODO(performance): estimate prob_max, after enough estimations -> fix it? # TODO(Mayou36): This control dependency is needed because otherwise the max won't be determined # correctly. A bug report on will be filled (WIP). # The behavior is very odd: if we do not force a kind of copy, the `reduce_max` returns # a value smaller by a factor of 1e-14 # with tf.control_dependencies([probabilities]): # UPDATE: this works now? Was it just a one-time bug? prob_max_inferred = tf.reduce_max(probabilities) else: prob_max_inferred = prob_max if weights_max is None: weights_max = tf.reduce_max(weights) * 0.99 # safety margin, also taking numericals into account weights_scaled = prob_max_inferred / weights_max * weights random_thresholds = thresholds_unscaled * weights_scaled if run.numeric_checks: assert_op = [tf.assert_greater_equal(x=weights_scaled, y=probabilities, message="Not all weights are >= probs so the sampling " "will be biased. If a custom `sample_and_weights` " "was used, make sure that either the shape of the " "custom sampler (resp. it's weights) overlap better " "or decrease the `max_weight`")] else: assert_op = [] with tf.control_dependencies(assert_op): take_or_not = probabilities > random_thresholds # rnd_sample = tf.expand_dims(rnd_sample, dim=0) if len(rnd_sample.shape) == 1 else rnd_sample take_or_not = take_or_not[0] if len(take_or_not.shape) == 2 else take_or_not filtered_sample = tf.boolean_mask(rnd_sample, mask=take_or_not, axis=0) if sample is None: sample = filtered_sample else: sample = tf.concat([sample, filtered_sample], axis=0) # efficiency (estimate) of how many samples we get eff = ztf.to_real(tf.shape(sample, out_type=tf.int64)[1]) / ztf.to_real(n_total_drawn) return n, sample, n_total_drawn, eff # TODO(Mayou36): refactor, remove initial call sample = tf.while_loop(cond=enough_produced, body=sample_body, # paraopt loop_vars=sample_body(n=n, sample=None, # run first once for initialization n_total_drawn=0, eff=efficiency_estimation), swap_memory=True, parallel_iterations=4, back_prop=False)[1] # backprop not needed here if multiple_limits: sample = tf.random.shuffle(sample) # to make sure, randomly remove and not biased. new_sample = sample[:n, :] # cutting away to many produced # TODO(Mayou36): uncomment below. Why was set_shape needed? leave away to catch failure over time # if no failure, uncomment both for improvement of shape inference # with suppress(AttributeError): # if n_samples_int is not a numpy object # new_sample.set_shape((n_samples_int, n_dims)) return new_sample
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prob_choice(p):\n \n return np.random.random_sample() < p", "def AcceptOrReject(gamma):\n \n u = np.random.rand()\n if(u<gamma):\n return True\n else:\n return False", "def sample(self, probability):\n return random.uniform(0, 1) < probability", "def accept_sample(self, proposal: np.array) -> bool:\n ratio = self.objective.p(proposal) / self.objective.p(self.theta)\n if np.random.uniform() < ratio:\n return True\n return False", "def test_does_not_sample_negligible_weight_priority(self):\n s = private_sampling.ThresholdSample(\n 1.0, private_sampling.PrioritySamplingMethod)\n s.process(\"a\", 1.0 / FAILURE_PROBABILITY_INVERSE)\n self.assertEmpty(s.elements)", "def rejection_sampling(data, weights):\n weights = np.asarray(weights)\n idx = weights > np.random.uniform(0, np.max(weights), len(weights))\n logger.info(\n \"Rejection sampling resulted in {} samples ({} input)\".format(\n idx.sum(), len(idx)\n )\n )\n return data[idx]", "def reject(D, reject_prob):\n D = all_to_numpy(D)\n p_rej = all_to_numpy(reject_prob)\n N = len(D)\n assert N == len(p_rej)\n if N == 0:\n return []\n p_s = uniform(0, 1, N)\n acc_ind = p_s > reject_prob\n return D[acc_ind], acc_ind", "def sample_prob(probs):\n return tf.to_float(tf.random_uniform(tf.shape(probs)) <= probs)", "def sample_prob(probs):\n return tf.to_float(tf.random_uniform(tf.shape(probs)) <= probs)", "def sample_discrete(probs):\r\n q = np.random.rand()\r\n i = 0\r\n p_sum = 0.0\r\n while p_sum < q:\r\n p_sum += probs[i]\r\n i += 1\r\n return i - 1", "def sample(self, probabilities):\n return self.sample_bernoulli(probabilities)", "def probability(prob):\n return random.random() <= prob", "def rejection_sample(self, trial_count):\n count = 0\n valid_trial_count = 1\n\n for i in xrange(trial_count):\n values = {}\n\n valid_sample = True\n\n for letter in self.letters:\n prob = self.variables[letter].get_prob(values)\n values[letter] = self.sample(prob)\n\n if letter in self.query.evidence:\n if (self.query.evidence[letter] != values[letter]):\n valid_sample = False\n break\n\n if valid_sample:\n valid_trial_count += 1\n\n if values[self.query.variable]:\n count += 1\n\n return float(count) / valid_trial_count", "def _sample_neg(self, assign_result, num_expected, **kwargs):\n neg_inds = torch.nonzero(assign_result.gt_inds == 0)\n if neg_inds.numel() != 0:\n neg_inds = neg_inds.squeeze(1)\n if len(neg_inds) <= num_expected:\n return neg_inds\n else:\n return self.random_choice(neg_inds, num_expected)", "def to_accept(x, x_new):\n if x_new>x:\n return True\n else:\n accept=np.random.uniform(0,1)\n # Since we did a log likelihood, we need to exponentiate in order to compare to the random number\n # less likely x_new are less likely to be accepted\n return (accept < (np.exp(x_new-x)))", "def acceptance(x, x_new):\n if x_new > x:\n return True\n else:\n accept = np.random.uniform(0, 1)\n # Since we did a log likelihood, we need to exponentiate in order to compare to the random number\n # less likely x_new are less likely to be accepted\n return accept < (np.exp(x_new-x))", "def test_does_not_sample_negligible_weight_ppswor(self):\n s = private_sampling.ThresholdSample(1.0,\n private_sampling.PpsworSamplingMethod)\n s.process(\n \"a\",\n math.log(\n FAILURE_PROBABILITY_INVERSE / (FAILURE_PROBABILITY_INVERSE - 1),\n math.e))\n self.assertEmpty(s.elements)", "def probability(self, samples):\n pass", "def to_accept_without_log(x, x_new):\n if x_new>x:\n return True\n else:\n accept=np.random.uniform(0,1)\n return (accept < x_new/(x+TOLERANCE))", "def _sample_using_random(\n self,\n p: float = 0.1,\n ):\n return sa.func.random() < p", "def sample(probs):\n\n probs = probs / probs.sum()\n return np.random.choice(np.arange(len(probs)), p=probs.flatten())", "def decision(self, probability):\n return random.random() < probability", "def rejection_sampling(target_density, proposal_density,\n generate_proposal_samples, envelope_factor,\n num_vars, num_samples, verbose=False,\n batch_size=None):\n if batch_size is None:\n batch_size = num_samples\n\n cntr = 0\n num_proposal_samples = 0\n samples = np.empty((num_vars, num_samples), dtype=float)\n while cntr < num_samples:\n proposal_samples = generate_proposal_samples(batch_size)\n target_density_vals = target_density(proposal_samples)\n proposal_density_vals = proposal_density(proposal_samples)\n assert target_density_vals.shape[0] == batch_size\n assert proposal_density_vals.shape[0] == batch_size\n urand = np.random.uniform(0., 1., (batch_size))\n\n # ensure envelop_factor is large enough\n if np.any(target_density_vals > (envelope_factor*proposal_density_vals)):\n I = np.argmax(\n target_density_vals/(envelope_factor*proposal_density_vals))\n msg = 'proposal_density*envelop factor does not bound target '\n msg += 'density: %f,%f' % (\n target_density_vals[I],\n (envelope_factor*proposal_density_vals)[I])\n raise ValueError(msg)\n\n I = np.where(\n urand < target_density_vals/(envelope_factor*proposal_density_vals))[0]\n\n num_batch_samples_accepted = min(I.shape[0], num_samples-cntr)\n I = I[:num_batch_samples_accepted]\n samples[:, cntr:cntr+num_batch_samples_accepted] = proposal_samples[:, I]\n cntr += num_batch_samples_accepted\n num_proposal_samples += batch_size\n\n if verbose:\n print(('num accepted', num_samples))\n print(('num rejected', num_proposal_samples-num_samples))\n print(('inverse envelope factor', 1/envelope_factor))\n print(('acceptance probability', float(\n num_samples)/float(num_proposal_samples)))\n return samples", "def _sample_neg(self, assign_result, num_expected, **kwargs):\n neg_inds = torch.nonzero(assign_result.gt_inds == 0)\n if neg_inds.numel() != 0:\n neg_inds = neg_inds.squeeze(1)\n if len(neg_inds) <= num_expected:\n repeat_ = num_expected // neg_inds.numel()\n return torch.cat((neg_inds.repeat(repeat_), self.random_choice(neg_inds, num_expected % neg_inds.numel())))\n else:\n return self.random_choice(neg_inds, num_expected)", "def test_rejection_sampling():\n # Check that it works with a numpy array\n original_samples = np.random.uniform(0, 10, (n_samples, n_params))\n weights = np.random.uniform(0, 5, n_samples)\n new_samples = rejection_sampling(original_samples, weights)\n # new_samples should have less samples than what we started with originally\n assert len(new_samples) <= n_samples\n # Each sample should be in the original posterior table\n assert all(new_sample in original_samples for new_sample in new_samples)\n # Each sample should be unique\n unique = np.unique(new_samples, axis=0)\n assert len(unique) == len(new_samples)\n\n # Now check that it works as expected for the\n # pesummary.utils.samples_dict.SamplesDict object\n original_samples = SamplesDict(\n {param: np.random.uniform(0, 10, n_samples) for param in gw_parameters()}\n )\n weights = np.random.uniform(0, 5, n_samples)\n new_samples = rejection_sampling(original_samples, weights)\n assert new_samples.number_of_samples <= original_samples.number_of_samples\n assert new_samples.parameters == original_samples.parameters\n assert all(\n new_sample in original_samples.samples.T for new_sample in\n new_samples.samples.T\n )", "def accept_reject(self, energy_new, energy_old):\n with np.errstate(invalid='ignore'):\n # The energy values being fed to Metropolis are 1-length arrays, and if\n # they are equal, their difference is 0, which gets multiplied by beta,\n # which is inf, and array([0]) * float('inf') causes\n #\n # RuntimeWarning: invalid value encountered in multiply\n #\n # Ignore this warning so so when the algorithm is on a flat plane, it always\n # accepts the step, to try to move off the plane.\n prod = -(energy_new - energy_old) * self.beta\n w = math.exp(min(0, prod))\n\n rand = self.random_gen.uniform()\n return w >= rand", "def deterministic_sample(choices, n_to_sample, p): # pylint: disable=invalid-name\n\n sample_counts = np.ceil(n_to_sample * p).astype(int)\n\n n_to_remove = np.sum(sample_counts) - n_to_sample\n\n if n_to_remove == 0:\n return choices[counts_to_vector(sample_counts)]\n\n non_zero_mask = sample_counts > 0\n\n removal_indices = np.floor(np.linspace(0.0,\n np.sum(non_zero_mask),\n n_to_remove,\n endpoint=False)).astype(int)\n\n tmp = sample_counts[non_zero_mask]\n tmp[removal_indices] = tmp[removal_indices] - 1\n\n sample_counts[non_zero_mask] = tmp\n\n assert np.sum(sample_counts) == n_to_sample\n\n samples = choices[counts_to_vector(sample_counts)]\n\n return samples", "def sample_categorical(distribution):\n sample = random.random()\n for event, prob in distribution.items():\n if sample < prob:\n return event\n sample -= prob\n raise ValueError('sum of distribution less than one')", "def random_prob(prob_thresh: float):\n seed = time.time()\n random.seed(seed)\n return prob_thresh > random.uniform(0, 1)", "def random_sample(prob):\n def _random_sample_xducer(step):\n def _random_sample_step(r=Missing, x=Missing):\n if r is Missing: return step()\n if x is Missing:\n return step(r)\n return step(r, x) if random() < prob else r\n return _random_sample_step\n return _random_sample_xducer" ]
[ "0.6598452", "0.6459912", "0.64536774", "0.62979674", "0.62703514", "0.62582386", "0.6248497", "0.6233459", "0.6233459", "0.6225764", "0.6088742", "0.6076565", "0.6037616", "0.60076493", "0.5997924", "0.59746337", "0.59586716", "0.5936798", "0.5931008", "0.5904506", "0.58943784", "0.5891417", "0.58595264", "0.58419067", "0.582668", "0.57885677", "0.57740146", "0.57621115", "0.5758016", "0.5749377" ]
0.667795
0
Return all extended pdfs that are daughters.
def extract_extended_pdfs(pdfs: Union[Iterable[ZfitPDF], ZfitPDF]) -> List[ZfitPDF]: from ..models.functor import BaseFunctor pdfs = convert_to_container(pdfs) indep_pdfs = [] for pdf in pdfs: if not pdf.is_extended: continue elif isinstance(pdf, BaseFunctor): if all(pdf.pdfs_extended): indep_pdfs.extend(extract_extended_pdfs(pdfs=pdf.pdfs)) elif not any(pdf.pdfs_extended): indep_pdfs.append(pdf) else: assert False, "Should not reach this point, wrong assumptions. Please report bug." else: # extended, but not a functor indep_pdfs.append(pdf) return indep_pdfs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_pdfs():\n\n return filter(lambda f: fnmatch.fnmatch(f, '*.pdf'), os.listdir(cwd))", "def get_all_pdf(soup):\n \n list_to_update = []\n report = soup.find_all('div', class_='mb-4 clearfix')\n \n for a in report[0].find_all('a', href=True):\n sub_link = a['href']\n if 'individual' in sub_link:\n if not (sub_link.startswith('http') or sub_link.startswith('www')):\n sub_link = 'https://www.transportation.gov' + sub_link\n sub_page = beautify_page(sub_link)\n\n list_to_update.append(sub_page.find(class_='file').find('a')['href'])\n \n return list_to_update", "def getPDFList(self):\n metadata = self._getMetadata()\n if not 'pdf_files' in metadata:\n metadata['pdf_files'] = PersistentDict()\n\n return metadata['pdf_files']", "def collect_documents(self):\n documents = []\n ignored = []\n for path in self.paths:\n try:\n current_document = MAE_Document(path)\n except UnsupportedMIMETypeError as e:\n ignored.append(str(e))\n else:\n documents.append(current_document)\n if ignored:\n print \"Some files were ignored:\"\n for file in ignored:\n print \"\\t%s\" % file\n return documents", "def get_all_reporters():\r\n for ep in iter_entry_points('attest.reporters'):\r\n yield ep.name", "def get_all_files(self):\n dp = FileSystemDataProvider.FileSystemDataProvider(self.folder)\n filenames = dp.getFileNames()\n htmlOut = \"available files:\"+\", \".join(filenames)\n return htmlOut", "def getExternalFiles(self):\n return []", "def finalDaughters(particle, daughters):\n if particle.numberOfDaughters()==0:\n daughters.append(particle)\n else:\n foundDaughter = False\n for i in range( particle.numberOfDaughters() ):\n dau = particle.daughter(i)\n if dau.status()>=1:\n daughters = finalDaughters( dau, daughters )\n foundDaughter = True\n if not foundDaughter:\n daughters.append(particle)\n return daughters", "def get_filenames():\n filenames = []\n for filename in Path('.').glob('*.pdf'):\n if 'reordered' not in filename.stem:\n filenames.append(filename)\n\n return filenames", "def get_full_df(self):\n\n galaxies = []\n for i, gal_name in enumerate(self.filenames):\n g_df = self.galaxies[gal_name].all_particle_properties(\n ).to_pandas()\n g_df['name'] = self.names[i]\n g_df['snap'] = self.snaps[i]\n galaxies.append(g_df)\n return pd.concat(galaxies)", "def getAllDSP (self, inDEV):\n result = []\n def filterDSP (list, dirname, names):\n for name in names:\n if name [-4:] == '.dsp':\n fullpath = os.path.join (dirname, name)\n list.append (fullpath)\n os.path.walk (inDEV, filterDSP, result)\n result = filter (self.isValidPattern, result)\n return result", "def get_files(self):\n return self.ebook_file.get_files()", "def listPrinters(self):\n raise NotImplementedError(\"listPrinters not implemented\")", "def get_all_desserts():\n return get_data_from_category_name(\"Desserts\")", "def findhtml(pathused,ticker,typ):\n\n allfiles = [] # initializing the return list\n pathused += \"/\"+ticker.upper()+\"/\"+typ # since SEC_edgar has a standard way to store files as its the Ticker and inside \n # sec-edgar-filings ==> AAPL ==> 10-K \n \n for r,d,f in os.walk(pathused): # os.walk will return all the files inside the directory (with absolute path)\n # r is the absolute path\n # f is list of files in the folders\n \n if 'filing-details.html' in f: # if filing.html (SEC-edgar convention to name html files) is in this folder \n pathfol = r.replace(\"\\\\\",\"/\") # we modify it \n allfiles.append(pathfol+'/filing-details.html') # we append the absolute path\n else:\n continue\n return allfiles #and return it", "def derivations(self):\n self.logger.debug(\"In derivations().\")\n\n from cutlass.HostWgsRawSeqSet import HostWgsRawSeqSet\n from cutlass.HostTranscriptomicsRawSeqSet import HostTranscriptomicsRawSeqSet\n\n for doc in self._derived_docs():\n if doc['node_type'] == \"host_transcriptomics_raw_seq_set\":\n yield HostTranscriptomicsRawSeqSet.load_host_transcriptomics_raw_seq_set(doc)\n elif doc['node_type'] == \"host_wgs_raw_seq_set\":\n yield HostWgsRawSeqSet.load_hostWgsRawSeqSet(doc)", "def list_report_files(self):\n\n gs = google_storage()\n cendr_bucket = gs.get_bucket(\"elegansvariation.org\")\n items = cendr_bucket.list_blobs(prefix=f\"reports/{self.gs_path}\")\n return {os.path.basename(x.name): f\"https://storage.googleapis.com/elegansvariation.org/{x.name}\" for x in items}", "def cleanPDFs(self):\n to_delete = []\n pdfs = self.getPDFList()\n now = datetime.now()\n\n # First we compute the list of files to delete.\n for filename in pdfs:\n filedate = pdfs[filename]\n delta = now - filedate\n\n if delta.seconds > 7200:\n to_delete.append(filename)\n\n existing_files = os.listdir(self.tempdir)\n for filename in to_delete:\n del pdfs[filename]\n if filename in existing_files:\n os.remove('%s/%s' % (self.tempdir,\n filename))\n self.setPDFList(pdfs)\n metadata = self._getMetadata()\n metadata['last_clean'] = now", "def get_PDF_links(soup):\n pdf_links = soup.find(\"div\", class_=\"calendar_panel time_table\").find_all(\"a\", class_=\"lnk\")\n pdf_links_parsed = []\n for p in pdf_links:\n if re.search(\"http.+\\.pdf\", p[\"onclick\"]):\n pdf_links_parsed.append(re.search(\"http.+\\.pdf\", p[\"onclick\"]).group(0))\n\n return pdf_links_parsed", "def getAllFiles(self):\n\n\t\treturn self.getFilesForDirs([])", "def _all_donors(self, include_background=True):\n sheets = self.shortcut_sheets\n if not include_background:\n sheets = filter(is_not_background, sheets)\n for sheet in sheets:\n for entity in sheet.bio_entities.values():\n yield entity", "def datapackager(dfiles):\n return core.annual_resource_datapackager(eia923_raw, dfiles)", "def extended_sampling(pdfs: Union[Iterable[ZfitPDF], ZfitPDF], limits: Space) -> tf.Tensor:\n samples = []\n pdfs = convert_to_container(pdfs)\n pdfs = extract_extended_pdfs(pdfs)\n\n for pdf in pdfs:\n n = tf.random.poisson(lam=pdf.get_yield(), shape=(), dtype=ztypes.float)\n sample = pdf._single_hook_sample(limits=limits, n=n, name=\"extended_sampling\")\n # sample.set_shape((n, limits.n_obs))\n samples.append(sample)\n\n samples = tf.concat(samples, axis=0)\n return samples", "def get_extra_assets(self):\n asset_list = []\n if self.extra_assets is None:\n return []\n return [ self.complete_static_filename(asset) \\\n for asset in self.extra_assets ]", "def list_pdf_paths(pdf_folder: str):\n return glob(op.join(pdf_folder, \"*\", \"*\", \"*.pdf\"))", "def yield_pdfs(DATA_PDFS_DIR, websites):\n for name, url in websites:\n filenames = set()\n path_to_pdfs = os.path.join(DATA_PDFS_DIR, name)\n for dirpath, dirnames, filenames in os.walk(path_to_pdfs):\n for filename in filenames:\n \n # If it's not a PDF, continue\n if not filename[-4:] == '.pdf':\n continue\n try:\n print(filename)\n # There might be duplicates in the log, so add to a set\n # and yield form this set later on. This saves times when\n # the log has multiple entries for the same file.\n filenames.add(filename) \n except:\n pass\n \n for filename in filenames:\n yield (name, filename)", "def all_pdf_files_in_directory(path):\n return sorted([filename for filename in os.listdir(path) if pdf_file(filename)])", "def filter_fontfiles(self, filenames, d=dict()):\n for f in filenames:\n n, ext = os.path.splitext(f)\n # skip for the files that are not supported\n if not ext in SUPPORTED_FONTS: continue\n\n d[n] = d[n] + [ext] if d.get(n) else [ext]\n return d", "def documents():\n for domain in os.listdir(DOCUMENT_FOLDER):\n for docname in os.listdir(os.path.join(DOCUMENT_FOLDER, domain)):\n filename = os.path.join(DOCUMENT_FOLDER, domain, docname)\n if filename.endswith(\".html\"):\n fullDocname = os.path.join(domain, docname)\n yield (fullDocname, filename)", "def get_fonts(self):\n\n font_path = self.execute_shell([\"figlet\", \"-I2\"])\n\n # get the font files installed in font_path,\n # and clean them up for printing\n fonts = [os.path.split(x)[1].split(\".\")[0] \\\n for x in self.execute_shell([\"find\",\n font_path, \"-iname\", \"*.flf\"]).split(\"\\n\")]\n\n return fonts" ]
[ "0.61845917", "0.54050064", "0.5381452", "0.5359397", "0.5254018", "0.5156865", "0.5090544", "0.50889254", "0.50726396", "0.49876747", "0.49808538", "0.49773595", "0.49442625", "0.49054474", "0.48585248", "0.4848331", "0.48360482", "0.48286647", "0.48228446", "0.47927624", "0.4777127", "0.47770035", "0.47754928", "0.4774502", "0.47658756", "0.4754261", "0.473249", "0.47314832", "0.47284603", "0.4724738" ]
0.6680698
0
Create a sample from extended pdfs by sampling poissonian using the yield.
def extended_sampling(pdfs: Union[Iterable[ZfitPDF], ZfitPDF], limits: Space) -> tf.Tensor: samples = [] pdfs = convert_to_container(pdfs) pdfs = extract_extended_pdfs(pdfs) for pdf in pdfs: n = tf.random.poisson(lam=pdf.get_yield(), shape=(), dtype=ztypes.float) sample = pdf._single_hook_sample(limits=limits, n=n, name="extended_sampling") # sample.set_shape((n, limits.n_obs)) samples.append(sample) samples = tf.concat(samples, axis=0) return samples
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rfd_poisson(ps,n):\n lam = sum(ps)\n G = len(ps)\n sample_q = lambda:nprandom.poisson(lam) # chromosomal occupancy approximately poisson.\n sampler = make_sampler(ps)\n return [direct_sampling_ps(ps,sample_q(),sampler) for i in xrange(n)]", "def convert_to_poisson(dp):\n return np.random.poisson(dp)", "def poisson_example(en, small2):\n from scipy.stats import poisson\n\n B = 1 << small2\n z = np.zeros(B); z[1] = 1\n wrap = irfft(np.exp(en * (rfft(z) - 1)))\n k = en // B + 1\n xs = k * B - (B >> 1) + np.arange(B)\n pmf = roll(wrap, B >> 1)\n df = pd.DataFrame({'x': xs, 'FFT pmf': pmf})\n po = poisson(en)\n df['Exact pmf'] = po.pmf(df.x)\n df = df.set_index('x', drop=True)\n fig, [ax0, ax1] = plt.subplots(1, 2, figsize=(FIG_W * 2, FIG_H + 0.3), constrained_layout=True)\n ax0.plot(wrap);\n ax0.set(title=f'Raw FFT-based output, wrapped to [0, {B}]',\n xlabel=f'Wrapped outcome, n mod {B}',\n ylabel='Probability mass, Pr(N=n)');\n df[['FFT pmf', 'Exact pmf']].plot(style=['-', ':'], ax=ax1, logy=True,\n title='Shifted FFT vs exact Poisson probabilities\\n(log scale)',\n xlabel='Outcome, n', ylabel='Probability mass, Pr(N=n)');\n ax1.set(ylim=[1e-17, 2 * df['FFT pmf'].max()])\n ax1.yaxis.set_minor_locator(ticker.LogLocator(subs='all'))", "def homogenous_poisson_gen():\n pass", "def inhomogenous_poisson_gen():\n pass", "def sample_pagerank(corpus, damping_factor, n):\n all_pages = []\n first_sample_prob = random.randint(0, len(corpus) - 1)\n distribution_count = dict()\n\n for u in corpus:\n distribution_count[u] = 0\n all_pages.append(u)\n\n sample = all_pages[first_sample_prob]\n for i in range(n - 1): # n - 1 because first sample was already calculated\n selection_bucket = dict()\n selection_start = 0.0\n sample_distribution = transition_model(corpus, sample, damping_factor)\n sample_prob = random.random()\n for u in sample_distribution:\n floor = selection_start\n ceiling = selection_start + sample_distribution[u]\n selection_start = ceiling\n selection_bucket[u] = [floor, ceiling]\n for u in selection_bucket:\n v = selection_bucket[u]\n if v[0] < sample_prob < v[1]:\n sample = u\n distribution_count[u] += 1\n distribution = dict()\n for u in distribution_count:\n distribution[u] = float(distribution_count[u]) / n\n\n return distribution", "def randomly_drawn_via_pdf_gen_from(self, total_samples: int):\r\n\r\n def func_gen(fit: af.Fit, total_samples: int) -> List[object]:\r\n samples = fit.value(name=\"samples\")\r\n\r\n return [\r\n self.object_via_gen_from(\r\n fit=fit,\r\n galaxies=samples.draw_randomly_via_pdf().galaxies,\r\n )\r\n for i in range(total_samples)\r\n ]\r\n\r\n func = partial(func_gen, total_samples=total_samples)\r\n\r\n return self.aggregator.map(func=func)", "def generate_samples(self):\n self.analytic_probability()", "def gen_sample ( self , nevents ) :\n if isinstance ( nevents , num_types ) and 0 < nevents :\n return poisson ( nevents )\n elif isinstance ( nevents , VE ) and \\\n ( ( 0 <= nevents.cov2 () and 0 < nevents ) or \n ( 0 < nevents.cov2 () and 0 < nevents + 3 * nevents.error() ) ) :\n for i in range ( 20000 ) :\n n = int ( ve_gauss ( nEvents ) )\n if 0 < n : return n \n else :\n self.error ( \"Can't generate positive number from %s\" % events )\n return\n \n self.error ( \"Can't generate positive number from %s/%s\" % ( events , type ( events ) ) )\n return", "def sample_pODF(nsamples,qpoints,coefs,N):\n points = np.zeros((nsamples,4))\n\n #Maximum of pODF\n C = ( (N + 1.0)**2 / (4.0 * np.pi) ) * coefs.sum()\n\n\n number_of_samples = 0\n while number_of_samples < nsamples:\n \n #Random sample on the sphere\n rphi = np.random.uniform( 0.0, 2.0*np.pi)\n rmu = np.random.uniform(-1.0, 1.0)\n \n rsin_theta = np.sqrt(1.0 - rmu**2)\n \n x,y,z = rsin_theta * np.cos(rphi), rsin_theta * np.sin(rphi), rmu\n\n f = np.abs(even_pODF(np.array([x,y,z]),qpoints,coefs,N))\n\n #Uniform random used for rejection\n rho = np.random.uniform(0.0, 1.0)\n \n if C*rho < f:\n #Accept random point\n points[number_of_samples,:] = np.array([x,y,z,f/C])\n number_of_samples += 1\n\n\n return points", "def _generate_distribution_samples(self, set_count, parameter_count):\n self._samples = numpy.zeros((set_count, parameter_count))\n for i, distribution in enumerate(self.parameter_distributions.values()):\n self._samples[:, i] = distribution.ppf(self._quantiles[:, i])", "def __init__(self, pdf: Union[Callable[[Union[float, np.ndarray]], Union[float, np.ndarray]], str],\n sample_size: int, seed: float = None, **kwargs):\n super().__init__()\n\n np.random.seed(seed)\n assert callable(pdf) | isinstance(pdf, str), 'Probability density function must be string or callable'\n self.pdf = pdf\n assert isinstance(sample_size, int), 'Sample size has to be specified as an integer'\n self.sample_size: int = np.random.poisson(lam=sample_size, size=1)\n self.inverse_transformation: bool = isinstance(pdf, str)\n self.r_sample: Optional[np.ndarray] = None\n self.z_sample: Optional[np.ndarray] = None\n self.kwargs: dict = kwargs\n\n if not self.inverse_transformation and (\n quad(self.pdf, 0, 1)[0] > 1.0001 or quad(self.pdf, 0, 1)[0] < 0.9999):\n warn('Supplied pdf function is not a proper pdf function as it integrates to {}, running'\n ' normalization'.format(quad(self.pdf, 0, 1)[0]), RuntimeWarning)\n normalize = quad(self.pdf, 0, 1)[0]\n pdf_tmp: Callable = self.pdf\n del self.pdf\n self.pdf = lambda x: pdf_tmp(x) / normalize\n self.pdf = np.vectorize(self.pdf)", "def custom_pdf(self, cum_probs, values):\n rnd_num = random()\n for p in range(len(cum_probs)):\n if rnd_num < cum_probs[p]:\n return values[p]", "def sample_discrete_pmf(X, PM, N):\n\n assert np.isclose(np.sum(PM), 1.0)\n assert all(0.0 <= p <= 1.0 for p in PM)\n \n y = np.zeros(N)\n cumulativePM = np.cumsum(PM) # build CDF based on PMF\n offsetRand = np.random.uniform(0, 1) * (1 / N) # offset to circumvent numerical issues with cumulativePM\n comb = np.arange(offsetRand, 1 + offsetRand, 1 / N) # new axis with N values in the range ]0,1[\n \n j = 0\n for i in range(0, N):\n while comb[i] >= cumulativePM[j]: # map the linear distributed values comb according to the CDF\n j += 1\t\n y[i] = X[j]\n \n return rd.permutation(y) # permutation of all samples", "def add_poisson_noise(self):\n\n # Can be done without loops, but this reduces memory requirements.\n for i in range(self.nintegs):\n\n ramp = deepcopy(self.data[i])\n\n # Convert up the ramp samples, to flux between reads.\n ramp[1:] = np.diff(ramp, axis=0)\n\n # Add the poisson noise.\n ramp = np.where(ramp < 0, 0, ramp) # Sanity check.\n ramp = rdm.poisson(ramp)\n\n # Convert back to up the ramp samples.\n ramp = np.cumsum(ramp, axis=0)\n\n self.data[i] = deepcopy(ramp)\n\n self.modif_str = self.modif_str + '_poisson_noise'", "def easy_sample(self, num, **kwargs):\n return self.preprocess(self.sample(num, **kwargs), **kwargs)", "def _generate_signal(self):\n x = np.arange(self.n, dtype='float')\n resample = np.random.rand(self.n) >= self.proba\n resample[0] = True # randomly initialize first sample\n x[resample] = np.random.randn(np.sum(resample))\n for i in x[~resample]:\n x[int(i)] = x[int(i)-1]\n return x", "def generate_samples(self, n_samples):", "def generate_samples(self, n_samples):", "def test_poisson(self):\n nt = 50\n ns = 1000\n num_giter = 5\n net = self.poisson\n\n times = []\n for i in range(ns):\n arrv = net.sample (nt)\n obs = arrv.subset (lambda a,e: a.is_last_in_queue(e), copy_evt)\n gsmp = net.gibbs_resample (arrv, 0, num_giter)\n resampled = gsmp[-1]\n evts = resampled.events_of_task (2)\n times.append (evts[0].d)\n \n exact_sample = [ numpy.random.gamma (shape=3, scale=0.5) for i in xrange (ns) ]\n times.sort()\n exact_sample.sort()\n \n print summarize(times)\n print summarize(exact_sample)\n \n netutils.check_quantiles (self, exact_sample, times, ns)", "def resampleParticles(self, gameState):\n self.particles = []\n for i in range(self.numParticles):\n self.particles.append(tuple(util.sample(self.uniformPrior) for _ in\n self.ghostIndices))", "def add_poisson_noise(self, sinogram, max_count=1000):\n\n temp = np.copy(sinogram)\n temp = temp * max_count\n temp = np.random.poisson(temp).astype('float')\n return temp / max_count", "def sample_pagerank(corpus, damping_factor, n):\n probabilities = dict()\n samples = []\n\n # Random first sample\n page = random.choice(list(corpus.keys()))\n samples.append(page)\n \n # Remaining samples after first\n for i in range(n-1):\n p = transition_model(corpus, page, damping_factor)\n page = random.choices(list(p.keys()), weights=list(p.values()), k=1)[0]\n samples.append(page)\n\n # Count\n for p in corpus.keys():\n probabilities[p] = samples.count(p) / n\n\n return probabilities", "def rfd_pois_binom(ps):\n q = inv_cdf_sample_fast(lambda k:dpois_binom(ps,k))\n return direct_sampling_ps(ps,q)", "def noise_generator(n, mean, std, fractindex):\n if fractindex not in VALID_FRACT:\n raise ValueError(\"results: status must be one of %r.\" % VALID_FRACT)\n \n stdev = std\n \n b = 2*fractindex-1\n print('beta: ', b)\n \n bdis = np.zeros(n)\n\n bdis[0] = 1\n for i in range(1,n):\n bdis[i] = bdis[i-1] * (0.5 * b + (i-1))/i # note that b is the shape parementer (b)\n\n plt.plot(bdis)\n plt.show\n\n wnt = np.random.normal(mean, stdev, size = n)\n print('WhiteNoise Stdev: ', np.std(wnt))\n plt.plot(wnt)\n plt.show()\n\n bdis_freq = np.fft.fft(bdis)\n wnt_freq = np.fft.fft(wnt)\n\n bdis_freq = bdis_freq[1:n+1]\n wnt_freq = wnt_freq[1:n+1]\n\n freq_total = bdis_freq * wnt_freq\n \n NumUniquePts = n/2 + 1\n NumUniquePts = int(NumUniquePts)\n j = np.arange(1, NumUniquePts)\n \n if fractindex > 1.0:\n j = j\n elif fractindex <= 1.0:\n j = j**0.5\n \n ft_half1 = freq_total[1:NumUniquePts]/j\n\n real = np.real(freq_total[1:NumUniquePts+1])\n real = np.flip(real, axis=0)\n\n imaginary = np.imag(freq_total[1:NumUniquePts+1])\n imaginary = np.flip(imaginary, axis=0)\n imaginary = 1j * imaginary\n\n ft_half2 = real - imaginary\n\n ft = np.hstack((ft_half1, ft_half2))\n \n x = np.fft.ifft(ft)\n x = np.real(x[:n])\n\n mean_diff = mean - np.mean(x)\n x = mean_diff + x\n print(np.mean(x))\n print(np.std(x))\n plt.plot(x)\n plt.show()\n \n return x", "def generate_in(self, n_samples, start_sample=0):\n pass", "def __resample(self):\n p_resample = []\n w_max = max(self.weights)\n index = int(round(random.uniform(0, self.n - 1)))\n beta = 0\n for i in range(self.n):\n beta += random.uniform(0, 2 * w_max)\n while self.weights[index] < beta:\n beta -= self.weights[index]\n index = (index + 1) % self.n\n p_resample.append(self.particles[index, :])\n return np.array(p_resample)", "def sample_at_data_energy(en, num=1):\n\n m1, m2 = dfe.loc[dfe['energy'] == en]['rho'], dfe.loc[dfe['energy'] == en]['z']\n xmin = dfe.loc[dfe['energy'] == en]['rho'].min()\n xmax = dfe.loc[dfe['energy'] == en]['rho'].max()\n ymin = dfe.loc[dfe['energy'] == en]['z'].min()\n ymax = dfe.loc[dfe['energy'] == en]['z'].max()\n\n X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j] # makes 100 by 100 grid\n positions = np.vstack([X.ravel(), Y.ravel()])\n values = np.vstack([m1, m2])\n kernel = stats.gaussian_kde(values, 0.5)\n Z = np.reshape(kernel(positions).T, X.shape)\n\n # Generate the bins for each axis\n x_bins = np.linspace(xmin, xmax, Z.shape[0]+1)\n y_bins = np.linspace(ymin, ymax, Z.shape[1]+1)\n\n # Find the middle point for each bin\n x_bin_midpoints = x_bins[:-1] + np.diff(x_bins)/2\n y_bin_midpoints = y_bins[:-1] + np.diff(y_bins)/2\n\n # Calculate the Cumulative Distribution Function(CDF)from the PDF\n cdf = np.cumsum(Z.ravel())\n cdf = cdf / cdf[-1] # Normalisation\n\n # Create random data\n values = np.random.rand(num)\n\n # Find the data position\n value_bins = np.searchsorted(cdf, values)\n x_idx, y_idx = np.unravel_index(value_bins,\n (len(x_bin_midpoints),\n len(y_bin_midpoints)))\n\n # Create the new data\n new_data = np.column_stack((x_bin_midpoints[x_idx],\n y_bin_midpoints[y_idx]))\n new_x, new_y = new_data.T\n\n return new_x, new_y", "def signal_generator(patient_generator, frame_size=2048, samples_per_patient=1):\n for _, (signal, _) in patient_generator:\n num_segments, segment_size = signal.shape\n for _ in range(samples_per_patient):\n # randomly choose a frame that lies within the segment i.e. no zero-padding is necessary\n segment_index = np.random.randint(num_segments)\n frame_start = np.random.randint(segment_size - frame_size)\n frame_end = frame_start + frame_size\n x = signal[segment_index, frame_start:frame_end]\n x = np.expand_dims(x, axis=1) # add channel dimension\n yield x", "def sample (self, n):\n y = self.bins\n x = np.r_[0, self.values.cumsum ()] / self.sum\n # interpolate inverse CDF\n out = np.interp (np.random.random (n), x, y)\n if n == 1:\n return out[0]\n else:\n return out.reshape ((n,))" ]
[ "0.5982566", "0.5851933", "0.5837932", "0.5833508", "0.5780479", "0.56297106", "0.5479673", "0.5455545", "0.5449404", "0.53448313", "0.5339353", "0.5321194", "0.5309528", "0.53024966", "0.5299598", "0.5280745", "0.5277724", "0.5273923", "0.5273923", "0.52673286", "0.52652013", "0.5257841", "0.52413136", "0.5238365", "0.522153", "0.5193093", "0.5190041", "0.5157876", "0.51571", "0.5156357" ]
0.6336474
0
Find all the subscribers in the SuggestBot database for the current language version of Wikipedia, check if any of them are due up for receiving suggestions, and then post suggestions to their user talk page (or userspace subpage if that is set).
def post_suggestions(self): # today is? # Note: We use UTC as the basis for our calculations, because # the Wikipedia API also returns timestamps as UTC, thus allowing # us to correctly post suggestions to new subscribers who saw # SuggestBot post to their user talk page earlier. now = datetime.utcnow() # Query to get all regular users of the current language versions getRegularsQuery = r"""SELECT * FROM {} WHERE lang=%(lang)s AND active=1 AND retired=0""".format(config.regulars_table) # Query to update a specific user's status (to processing|idle|ready) setStatusQuery = r"""UPDATE {} SET status=%(status)s WHERE lang=%(lang)s AND username=%(username)s""".format(config.regulars_table) # Query to update a specific user's last recommendation time setLastrecQuery = r"""UPDATE {} SET last_rec=%(rectime)s WHERE lang=%(lang)s AND username=%(username)s""".format(config.regulars_table) # Query to get the time of the last suggestion posted getLastRecQuery = r"""SELECT MAX(last_rec) AS last_rec FROM {} WHERE lang=%(lang)s AND active=1""".format(config.regulars_table) # query to increment the number of recommendations count incRecCountQuery = r'''UPDATE {} SET n_recs=n_recs+1 WHERE lang=%(lang)s AND username=%(user)s'''.format(config.regulars_table) # Query to set (or reset) the busy bit in the status info table updateStatusTableQuery = r"""UPDATE {status} SET daily_running=%(status)s WHERE lang=%(lang)s""".format(status=config.status_table) # Query to check the busy bit in the status info table, so that # multiple updates don't run at the same time (otherwise we'll get # double-posts (how do we know that? we tested it!)) checkStatusTableQuery = r"""SELECT daily_running FROM {status} WHERE lang=%(lang)s""".format(status=config.status_table) # instantiate the database object, and connect myDb = db.SuggestBotDatabase() # if connection fails, fail too. if not myDb.connect(): logging.error('unable to connect to the SuggestBot database') return(False) (dbconn, dbcursor) = myDb.getConnection() # Check if a job is already running dbcursor.execute(checkStatusTableQuery, {'lang': self._lang}) row = dbcursor.fetchone() dbcursor.fetchall() # flush cursor if ord(row['daily_running']): logging.warning("SuggestBot is already posting to users on {0}wiki, exiting!".format(self._lang)) return(True) ## Instantiating bot so we can get suggestions sbot = suggestbot.SuggestBot(lang=self._lang) # Update the status of busyness to pretty busy... dbcursor.execute(updateStatusTableQuery, {'status': 1, 'lang': self._lang}) dbconn.commit() # Figure out how long since we last ran. dbcursor.execute(getLastRecQuery, {'lang': self._lang}) row = dbcursor.fetchone() dbcursor.fetchall() # flush cursor # Check that we got a row and that it's something... if row and row['last_rec']: timeSinceLastRun = now - row['last_rec'] # If tSLR.days < 0, something's not right: if timeSinceLastRun.days < 0: logging.error("Time since last set of recs posted is negative, aborting!") return(False) else: # We might see this branch the first time we're running... timeSinceLastRun = timedelta(0) # If it's more than one day since we last ran, we don't look # into the future, instead we'll just catch up. Otherwise, # we look half the distance into the future. # FIXME: this will bump people if one run runs a little long, # and the user is at the end of the next run. We should instead # store the start and end-time of the last run somewhere, perhaps # actually have a log, and then use the last start-time from the log. lookaheadTime = 0 if timeSinceLastRun.days == 0: lookaheadTime = timeSinceLastRun.seconds / 2 logging.info("looking {0} seconds ahead for due recs.".format(lookaheadTime)) # Store users who should get recs in this list: userQueue = list() dbcursor.execute(getRegularsQuery, {'lang': self._lang}) done = False while not done: row = dbcursor.fetchone() if not row: done = True continue # The values of the row we currently use: lastRec = row['last_rec'] period = row['period'] username = row['username'].decode('utf-8') pagetitle = row['page_title'] if pagetitle: pagetitle = pagetitle.decode('utf-8') design = row['design'] recTemplate = config.templates[self._lang]['regulars'] # If the user has chosen to use a different design from the default, # check if we have a template and possibly use that. if design: try: recTemplate = config.templates[self._lang][design] except KeyError: pass # If the user wants recs replaced, do so. replace = False if ord(row['replace_recs']): replace = True # FIXME: better to use the Subscriber object now, since it is # here and has slots for all the variables. Makes more sense. # if lastRec is None (NULL), they didn't receive any recs earlier, # which means it's definitely time to post. if not lastRec: ## print('lastRec is None/False, adding user') userQueue.append({'username': username, 'page': pagetitle, 'replace': replace, 'template': recTemplate, }) continue # Use last rec and period to check if it's time to post or not if period == 0: # Add 28 days to last rec. This is stricly not always # "once a month", but it's a lot easier than trying to # handle overflow when the last recommendation occurred near # the end of the previous month (e.g. Jan to Feb). It also # has the added feature that recommendations usually happen on # the same day of the week. modLastRec = lastRec + timedelta(days=28) else: # add 'period' days to last rec modLastRec = lastRec + timedelta(days=period) # subtract the modified last rec from today timelapse = now - modLastRec # It's time to post recommendations if we're past this user's due # date, or if it's less than lookaheadTime seconds ahead. # This makes sure that we don't always bump users to the # next day's recommendations, which would otherwise mean # we'd consistently post a day late. if timelapse.days >= 0 \ or (timelapse.days == -1 and (86400 - timelapse.seconds) < lookaheadTime): # add {'username':username, 'page':page_title} to list userQueue.append({'username': username, 'page': pagetitle, 'replace': replace, 'template': recTemplate, }) logging.info("Checked subscribers, found {n} users to post to.".format( n=len(userQueue))) # (We shuffle the user list so it doesn't necessarily get processed in # alphabetical order, IIRC the results of this SELECT is in sorted # order because we use a primary key) if len(userQueue) > 0: shuffle(userQueue) # for each user on said list... for user in userQueue: # update database to processing dbcursor.execute(setStatusQuery, {'status': 'processing', 'lang': self._lang, 'username': user['username'].encode('utf-8')}) dbconn.commit() logging.info("now getting recs for User:{username}".format( username=user['username'])) # Get recommendations and post... # Design and template is passed along based on what we looked # up earlier. success = sbot.recommend(username=user['username'], userGroup='suggest', filterMinor=True, filterReverts=True, page=user['page'], recTemplate=user['template'], replace=user['replace']) if success: # update database to idle, and update last_rec dbcursor.execute(setStatusQuery, {'status': 'idle', 'lang': self._lang, 'username': user['username'].encode('utf-8')}) # we don't update the rec time on a test run... if not config.testrun: # Note: we call utcnow() to store the closest last recommendation # time in the database. If some slack is needed with regards to # posting time, we can instead alter the scheduling. dbcursor.execute(setLastrecQuery, {'rectime': datetime.utcnow(), 'lang': self._lang, 'username': user['username'].encode('utf-8')}) # update count of number of recommendations for this user dbcursor.execute(incRecCountQuery, {'lang': self._lang, 'user': user['username'].encode('utf-8')}) dbconn.commit() logging.info("Posted recs to User:{username}".format( username=user['username'])) # Update the status of busyness to pretty unbusy... dbcursor.execute(updateStatusTableQuery, {'status': 0, 'lang': self._lang}) dbconn.commit() # disconnect from database myDb.disconnect() # ok, done return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_instant_notification_subscribers(\n self,\n potential_subscribers = None,\n mentioned_users = None,\n exclude_list = None,\n ):\n #print '------------------'\n #print 'in content function'\n subscriber_set = set()\n #print 'potential subscribers: ', potential_subscribers\n\n #1) mention subscribers - common to questions and answers\n if mentioned_users:\n mention_subscribers = EmailFeedSetting.objects.filter_subscribers(\n potential_subscribers = mentioned_users,\n feed_type = 'm_and_c',\n frequency = 'i'\n )\n subscriber_set.update(mention_subscribers)\n\n origin_post = self.get_origin_post()\n\n #print origin_post\n\n #2) individually selected - make sure that users\n #are individual subscribers to this question\n selective_subscribers = origin_post.followed_by.all()\n #print 'question followers are ', [s for s in selective_subscribers]\n if selective_subscribers:\n selective_subscribers = EmailFeedSetting.objects.filter_subscribers(\n potential_subscribers = selective_subscribers,\n feed_type = 'q_sel',\n frequency = 'i'\n )\n subscriber_set.update(selective_subscribers)\n #print 'selective subscribers: ', selective_subscribers\n\n #3) whole forum subscribers\n global_subscribers = origin_post.get_global_instant_notification_subscribers()\n subscriber_set.update(global_subscribers)\n\n #4) question asked by me (todo: not \"edited_by_me\" ???)\n question_author = origin_post.author\n if EmailFeedSetting.objects.filter(\n subscriber = question_author,\n frequency = 'i',\n feed_type = 'q_ask'\n ):\n subscriber_set.add(question_author)\n\n #4) questions answered by me -make sure is that people \n #are authors of the answers to this question\n #todo: replace this with a query set method\n answer_authors = set()\n for answer in origin_post.answers.all():\n authors = answer.get_author_list()\n answer_authors.update(authors)\n\n if answer_authors:\n answer_subscribers = EmailFeedSetting.objects.filter_subscribers(\n potential_subscribers = answer_authors,\n frequency = 'i',\n feed_type = 'q_ans',\n )\n subscriber_set.update(answer_subscribers)\n #print 'answer subscribers: ', answer_subscribers\n\n #print 'exclude_list is ', exclude_list\n subscriber_set -= set(exclude_list)\n\n #print 'final subscriber set is ', subscriber_set\n return list(subscriber_set)", "async def suggest(self, ctx, choice=None):\n\n if choice is None or choice.lower() in (\"online\", \"voice\"):\n suggestions = get_suggestions(get_users(ctx, choice))\n\n if suggestions:\n await self.bot.say(\"You can play these games: \\n\")\n message = pagify(\"\\n\".join(suggestions), ['\\n'])\n\n for page in message:\n await self.bot.say(box(page))\n else:\n await self.bot.say(\"You have exactly **zero** games in common, go buy a 4-pack!\")\n else:\n await self.bot.say(\"Please enter a valid filter -> either use `online` (default) for all online users or `voice` for all users in a voice channel\")", "def suggestions_wikipedia(query, lang=\"fr\"):\n wikipedia.set_lang(\"fr\")\n return wikipedia.search(query, results=10)", "def build_suggesters(DomainName=None):\n pass", "def _lookup(self, search, auto_suggest=True):\n try:\n # Use the version of Wikipedia appropriate to the request language\n dict = self.translate_namedvalues(\"wikipedia_lang\")\n wiki.set_lang(dict[\"code\"])\n\n # First step is to get wiki article titles. This comes back\n # as a list. I.e. \"beans\" returns ['beans',\n # 'Beans, Beans the Music Fruit', 'Phaseolus vulgaris',\n # 'Baked beans', 'Navy beans']\n results = wiki.search(search, 5)\n if len(results) == 0:\n self.speak_dialog(\"no entry found\")\n return\n\n # Now request the summary for the first (best) match. Wikipedia\n # writes in inverted-pyramid style, so the first sentence is the\n # most important, the second less important, etc. Two sentences\n # is all we ever need.\n lines = 2\n summary = wiki.summary(results[0], lines,\n auto_suggest=auto_suggest)\n\n if \"==\" in summary or len(summary) > 250:\n # We hit the end of the article summary or hit a really long\n # one. Reduce to first line.\n lines = 1\n summary = wiki.summary(results[0], lines,\n auto_suggest=auto_suggest)\n\n # Now clean up the text and for speaking. Remove words between\n # parenthesis and brackets. Wikipedia often includes birthdates\n # in the article title, which breaks up the text badly.\n summary = re.sub(r'\\([^)]*\\)|/[^/]*/', '', summary)\n\n # Send to generate displays\n self.gui.clear()\n pagetext = wiki.page(results[0], auto_suggest=auto_suggest)\n self.gui['summary'] = summary\n self.gui['imgLink'] = wiki_image(pagetext)\n self.gui.show_page(\"WikipediaDelegate.qml\", override_idle=60)\n\n # Remember context and speak results\n self.set_context(\"wiki_article\", results[0])\n self.set_context(\"spoken_lines\", str(lines))\n self.speak(summary)\n self.results = results\n\n except wiki.exceptions.DisambiguationError as e:\n # Test: \"tell me about john\"\n options = e.options[:5]\n\n option_list = (\", \".join(options[:-1]) + \" \" +\n self.translate(\"or\") + \" \" + options[-1])\n choice = self.get_response('disambiguate',\n data={\"options\": option_list})\n if choice:\n self._lookup(choice, auto_suggest=auto_suggest)", "def userSuggestions(database):\n firstname=str(input(\"who do you want to have follow suggestions for :\"))\n usr,find=getByName(database,firstname)\n if not find:\n print(\"the User could not be found\")\n return\n else:\n following=[]\n followers=[]\n for folower in usr.folowed:\n followers.append(folower)\n for folowed in usr.folow:\n following.append(folowed)\n results=[]\n print(\"On what do you want your suggestions to be based on?\\n1. Mutual Interests\\n2. Mutual Connections\\n3. Both\")\n choice=int(input(\"Your choice :\"))\n for key ,usrs in database.items():\n if key not in following: \n correspondant=0\n if choice == 1 or choice == 3:\n for interest in usr.interest:\n if interest in usrs.interest:\n correspondant+=1\n if choice == 2 or choice == 3:\n for folower in followers:\n for folows in usrs.folowed:\n if key == folows:\n correspondant+=1\n results.append([key,correspondant])\n for i in range(len(results)):\n for j in range(0, len(results)-i-1):\n if results[j][1] > results[j+1][1] :\n results[j], results[j+1] = results[j+1], results[j]\n for k in range(5):\n print(results[k][0])", "def _make_suggestions(self):\n\n #build concordance based on current approved\n concordance = dict()\n for term in self.tree.get_children('approved'):\n words = [word.strip(',.:;*').lower() \\\n for word in str(self.tree.item(term)['values'][0]).split(' ')]\n for word in words:\n# if word == 'ad':\n# messagebox.showwarning(\"word == 'ad'\",\"concordance={}\".format(concordance))\n# pass\n if word not in ['and', 'the', 'a', 'to', 'of'] \\\n and not word.isdigit():\n if word not in concordance:\n concordance[word] = set([term, ])\n else:\n concordance[word].add(term)\n# if word == 'ad':\n# messagebox.showwarning(\"word 'ad' added?\",\"concordance={}\".format(concordance))\n# pass\n \n \n #so concordance now holds a list of words in approved terms along with\\\n #list of index of terms() they occur in\n \n for term in self.tree.get_children('suggestions'):\n self._look_in_concordance(term, concordance)\n\n for term in self.tree.get_children('unknown'):\n self._look_in_concordance(term, concordance)\n\n self._collapse_all()", "def notify_searchers(search_id):\n\n searchers = Searcher.objects.filter(search=search_id)\n\n for searcher in searchers:\n pass # Notify via email about clasification results", "def pull_suggestion(self, callback, who, arg):\n\t\t\n random_sug = self.dong.db.get_random_row('suggest')\n res = self.google_suggest(callback, who, random_sug[2], False)\n\t\t\n w = res.split()\n if w[0].lower() in ('what', 'why', 'was', 'where', 'who', 'which', 'whom', 'when', 'how', 'is', 'are', 'did'):\n if w[-1:] != '?':\n res = res + '?'\n return res.capitalize()", "def testing_suggest_an_update():\n My.search_merchant_page(driver, My.Testing_Env_EN + \"/bus/Quebec/Montreal/Chalet-Bar-B-Q/3391918\")\n suggest_an_update()\n driver.quit()", "def findSuggestions():\n users = None\n if current_user.genderPreferences == \"any\":\n users = User.query.filter(or_(User.genderPreferences==current_user.gender, User.genderPreferences=='any'), User.state==current_user.state, User.city==current_user.city, User.id!=current_user.id).all()\n elif current_user.genderPreferences == \"male\":\n users = User.query.filter(or_(User.gender==\"male\", User.gender==\"other\"), or_(User.genderPreferences==current_user.gender, User.genderPreferences==\"any\"), User.state==current_user.state, User.city==current_user.city, User.id!=current_user.id).all()\n elif current_user.genderPreferences == \"female\":\n users = User.query.filter(or_(User.gender==\"female\", User.gender==\"other\"), or_(User.genderPreferences==current_user.gender, User.genderPreferences==\"any\"), User.state==current_user.state, User.city==current_user.city, User.id!=current_user.id).all()\n show_users = []\n print(users)\n for user in users:\n if (not user in current_user.likes) and (not user in current_user.dislikes):\n show_users.append(user)\n print(show_users)\n return show_users", "def suggestions(self):\r\n return suggestions.ForumSuggestions(self)", "def get_suggestions():\n\n flash(\"The Recommendation feature is under construction! Please check back soon!\")\n return render_template('index.html')", "def redditLiker(driver, searchTerms):\n\n for _searchTerm in searchTerms:\n _searchString = \"https://new.reddit.com/search?q=\" + _searchTerm + \"&t=all&sort=new\"\n try:\n driver.get(_searchString)\n time.sleep(2)\n print (\"Page is ready!\")\n except TimeoutException:\n print (\"Loading took too much time!\")\n\n _subscribeButtons = driver.find_elements_by_xpath(\"/html/body/div[1]/div/div[2]/div/div/div[1]/div[2]/div/div/div/div[2]/div[2]/div[1]/div/div[2]/div/div[2]/div/div/a/div[3]/button\")\n print(len(_subscribeButtons))\n _randomNumber = random.sample(range(0, 3), 2)\n _randomNumber.sort()\n for _ran in _randomNumber:\n try:\n _subscribeButtons[_ran].click()\n time.sleep(1)\n except Exception:\n print (\"SubscribeButtons could not be scrolled into view\")\n\n _likeButtons = driver.find_elements_by_xpath('/html/body/div[1]/div/div[2]/div/div/div[1]/div/div/div/div/div[2]/div[2]/div[1]/div/div[3]/div[1]/div/div/div/div[1]/div/button[1]')\n _lmt = len(_likeButtons)\n _smallLmt = int(_lmt/3)\n _randomNumber = random.sample(range(0, _lmt), _smallLmt)\n _randomNumber.sort()\n for i in range (0, _smallLmt):\n try:\n _likeButtons[_randomNumber[i]].click()\n except Exception:\n print (\"Like could not be scrolled into view\")", "def handle_suggest():\n return 0", "def suggestion_loop(_suggestions, _suggestions_tv, _suggestions_p, _suggestions_tv_p):\n user_input = \"\"\n\n print(\"Note: All queries default to movie results.\")\n print(\"Note: Personalized suggestions doesn't include average rating when calculating suggestions.\")\n first = True\n\n while True:\n # clear terminal, but not for first loop\n if first:\n first = False\n else:\n os.system('clear')\n\n # Print options\n print(\"Options:\")\n print(\"\\t's[uggest] [tv]' \\t- preview <media_type>_{}\".format(SUGGESTIONS_PATH))\n print(\"\\t's[uggest] [tv] p[ersonalized]' \\t- preview <media_type>_{}\".format(SUGGESTIONS_PERSONALIZED_PATH))\n print(\"\\t'g[enre]' \\t- show genre options\")\n print(\"\\t'[tv] [-]<genre>, ...' \\t- preview <media_type>_{} filtered by genre[s]\".format(SUGGESTIONS_PATH))\n print(\"\\t'u[pdate]' \\t- update ratings.csv from imdb and generate suggestion files\")\n\n # Option suggest (preview options with vim bindings)\n if command_match(user_input, \"suggest\"):\n preview_suggestions(_suggestions)\n elif command_match(user_input, \"suggest tv\"):\n preview_suggestions(_suggestions_tv, media_type=\"tv\")\n # Option suggest personal (preview options with vim bindings)\n elif command_match(user_input, \"suggest personalized\"):\n preview_suggestions(_suggestions_p)\n elif command_match(user_input, \"suggest tv personalized\"):\n preview_suggestions(_suggestions_tv_p, media_type=\"tv\")\n\n # Option genre (print possible genres)\n elif command_match(user_input, \"genre\"):\n print(\"Genres:\")\n for g_id, g in GENRES.items():\n print(\"\\t{} - {}\".format(g, g_id))\n\n # TODO Option <genre>, filter suggestions by <genre> and preview\n # TODO filter by multiple genres\n elif user_input and is_subset([x.strip().strip('-') for x in user_input.split(\",\")], [g.lower() for g in GENRES.values()]):\n preview_suggestions(_suggestions, genres=[x.strip() for x in user_input.split(\",\")])\n elif user_input and len(user_input.split()) > 1 and command_match(user_input.split()[0], \"tv\") and is_subset([x.strip().strip('-') for x in user_input.split(\" \", 1)[1].split(\",\")], [g.lower() for g in GENRES.values()]):\n preview_suggestions(_suggestions_tv, media_type=\"tv\", genres=[x.strip() for x in user_input.split(\" \", 1)[1].split(\",\")])\n\n # Option update ratings.csv from imdb and update suggestions.txt\n elif command_match(user_input, \"update\"):\n # TODO download ratings directly from imdb\n _ratings = load_ratings()\n _suggestions, _suggestions_p = update_suggestions(_ratings, MOVIE)\n _suggestions_tv, _suggestions_tv_p = update_suggestions(_ratings, TV)\n\n elif user_input == 'q':\n sys.exit(0)\n\n elif user_input:\n print(\"Command '{}' didn't match.\".format(user_input))\n\n # Read user input\n user_input = input(\"Enter command: \")", "def suggest_an_update():\n # Locating the container\n container = My.search_presence_webelement(\n driver, By.XPATH, \"//*[@id='ypgBody']/div[3]/div/div[4]/div[2]/div[6]/ul\")\n assert container\n\n # Locating the Suggest an Update button\n suggest_an_update_button = My.search_clickable_webelement(\n container, By.XPATH, \"//*[@id='ypgBody']/div[3]/div/div[4]/div[2]/div[6]/ul/li[4]/button\")\n assert suggest_an_update_button\n suggest_an_update_button.click()\n\n # Locating the Suggest an Update popup window\n suggest_popup = My.search_presence_webelement(\n driver, By.XPATH, \"//*[@id='ypSuggestUpdateOwner']/div/div\")\n assert suggest_popup", "def youtube_fetch(self, mess, args):\n user = self.get_sender_username(mess)\n if user in self.users:\n self.log.info('%s queried %s from Youtube.' % (user, args))\n yt_service = gdata.youtube.service.YouTubeService()\n query = gdata.youtube.service.YouTubeVideoQuery()\n query.racy = 'include'\n query.orderby = 'relevance'\n query.max_results = 1\n query.vq = args\n\n feed = yt_service.YouTubeQuery(query)\n self.message_queue.append('%s searched for %s ...' %(self.users[user], args))\n\n for entry in feed.entry:\n self.message_queue.append('... and here you go -- %s' % entry.GetHtmlLink().href)", "def FoodSuggest(sc, event):\n db = pymysql.connect(host='localhost', user='pizzabot', db='pizzachat')\n cursor = db.cursor()\n query = 'SELECT * FROM foodlist ORDER BY RAND() LIMIT 1'\n cursor.execute(query)\n suggestion = cursor.fetchall()\n db.close()\n sc.api_call('chat.postMessage', as_user='true', channel=event['channel'],\n text='On %s, %s had: %s' % suggestion[0])", "async def Suggestion(self, ctx, *, sug:str=None):\r\n\t\tif not sug:\t\r\n\t\t\treturn await ctx.send('No Suggestions given')\r\n\r\n\t\tif \tself.settings.BotConfig('SuggestionChannel') != 0:\r\n\t\t\tch = self.bot.get_channel(self.settings.BotConfig('SuggestionChannel'))\r\n\t\t\tif ctx.author.top_role.colour:\r\n\t\t\t\tcol = ctx.author.top_role.colour\r\n\t\t\telse:\r\n\t\t\t\tcol =self.settings.randomColor()\r\n\r\n\t\t\tembed=discord.Embed(title=\"Suggestion\", description=f\"{sug}\", color=col)\r\n\t\t\tembed.set_footer(text=f\"Server: {ctx.guild} || User: {ctx.author}\")\r\n\t\t\tawait ctx.send('I have sent Suggestion')\r\n\t\t\tawait ch.send(embed=embed)\r\n\t\telse:\r\n\t\t\tawait ctx.send('No Suggestion channel found')", "def main():\n query = Query(limit=100, tag=\"utopian-io\")\n result = get_urls()\n moderators = [moderator[\"account\"] for moderator\n in constants.DB_UTEMPIAN.moderators.find()]\n for post in Discussions_by_created(query):\n steemit_url = (\n f\"{constants.STEEMIT_URL}{post.category}/{post.authorperm}\")\n if steemit_url not in result:\n\n tags = post.json_metadata[\"tags\"]\n\n # Checking if valid post\n if (len(tags) < 2 or post[\"created\"].date() < constants.THIS_WEEK):\n continue\n else:\n is_valid, category = valid_category(tags)\n if not is_valid:\n continue\n elif (category == \"translations\" and\n post.author not in constants.UTOPIAN_TRANSLATORS):\n constants.LOGGER.error(\n f\"{steemit_url} not made by accepted translator!\")\n continue\n elif (category == \"iamutopian\" and\n post.author not in moderators):\n continue\n repository = get_repository(post)\n\n # If user banned, set moderator as BANNED and score to 0\n if (post.author, \"Yes\") not in constants.BANNED_USERS:\n row = [\"\", \"\", steemit_url, repository, category]\n else:\n today = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n row = [\"BANNED\", str(today), steemit_url, repository, category,\n \"0\", \"\", \"\", \"\", \"\", 0]\n constants.LOGGER.info(\n f\"Commenting on {steemit_url} - BANNED.\")\n banned_comment(steemit_url)\n\n constants.UNREVIEWED.append_row(row)\n result = get_urls()\n constants.LOGGER.info(\n f\"{steemit_url} has tags: {tags} and was added.\")\n store_contribution(post, category)", "def _search_suggestions():\n now = time.time()\n words_q = Application.objects.values('acronym',\n 'owner', 'owner_org',\n 'nasa_off_name', 'nasa_requester',\n 'manager_app_development', 'manager_project',\n 'dev_name_primary', 'dev_name_alternate').distinct()\n wordset = set()\n for worddict in words_q:\n vals = worddict.values()\n for val in vals:\n wordset.add(val)\n words = [word for word in wordset if word]\n words.sort()\n logging.info(\"search_suggestions len=%d time=%f\" % (len(words), time.time() - now))\n return json.dumps(words)", "def send_suggestion_email(\n exploration_title, exploration_id, author_id, recipient_list):\n\n email_subject = 'New suggestion for \"%s\"' % exploration_title\n\n email_body_template = (\n 'Hi %s,<br>'\n '%s has submitted a new suggestion for your Oppia exploration, '\n '<a href=\"https://www.oppia.org/create/%s\">\"%s\"</a>.<br>'\n 'You can accept or reject this suggestion by visiting the '\n '<a href=\"https://www.oppia.org/create/%s#/feedback\">feedback page</a> '\n 'for your exploration.<br>'\n '<br>'\n 'Thanks!<br>'\n '- The Oppia Team<br>'\n '<br>%s')\n\n if not feconf.CAN_SEND_EMAILS:\n log_new_error('This app cannot send emails to users.')\n return\n\n if not feconf.CAN_SEND_FEEDBACK_MESSAGE_EMAILS:\n log_new_error('This app cannot send feedback message emails to users.')\n return\n\n author_settings = user_services.get_user_settings(author_id)\n can_users_receive_email = (\n can_users_receive_thread_email(recipient_list, exploration_id, True))\n for index, recipient_id in enumerate(recipient_list):\n recipient_user_settings = user_services.get_user_settings(recipient_id)\n if can_users_receive_email[index]:\n # Send email only if recipient wants to receive.\n email_body = email_body_template % (\n recipient_user_settings.username, author_settings.username,\n exploration_id, exploration_title, exploration_id,\n EMAIL_FOOTER.value)\n _send_email(\n recipient_id, feconf.SYSTEM_COMMITTER_ID,\n feconf.EMAIL_INTENT_SUGGESTION_NOTIFICATION,\n email_subject, email_body, feconf.NOREPLY_EMAIL_ADDRESS)", "def suggestions(request):\n # Get login user profile\n u_profile = UserProfile.objects.get(user=request.user)\n # Search by user profile\n sqs = SearchQuerySet().filter(business_area=u_profile.business_area, city=u_profile.city,\n handicap_36__lte=int(u_profile.handicap_36) + 1,\n handicap_36__gte=int(u_profile.handicap_36) - 1,\n handicap_us__lte=int(u_profile.handicap_us) + 1,\n handicap_us__gte=int(u_profile.handicap_us) - 1)\n # Get user list\n queryset = User.objects.all()\n # Create result list\n results_list = []\n # Get User to list by id\n max_loop = sqs.count()\n for x in range(0, max_loop):\n user = get_object_or_404(queryset, pk=sqs[x].object.id)\n results_list.append(user)\n # Convert to serializer\n serializer = UserSerializer(results_list, many=True)\n if serializer.is_valid:\n return Response({'status': '200', 'code': 'OK_SUGGESTION',\n 'detail': serializer.data}, status=200)\n else:\n return Response({'status': '400', 'code': 'E_INVALID_PARAMETER_VALUES',\n 'detail': serializer.errors}, status=400)", "async def wikipedia(self, ctx, *args):\n if args[0] == 'random':\n search_string = wp.random()\n else:\n search_string = ' '.join(args)\n try:\n page = wp.page(search_string)\n await ctx.send(page.url)\n self.logger.info(misolog.format_log(ctx, f\"\"))\n except wp.exceptions.DisambiguationError as error:\n await ctx.send(f\"```{str(error)}```\")\n self.logger.info(misolog.format_log(ctx, f\"Disambiguation page\"))", "def suggest_completions(cls, text):\n USER_SUGGEST = 'user-suggest'\n es = UserMappingType.search().get_es()\n\n results = es.suggest(index=cls.get_index(), body={\n USER_SUGGEST: {\n 'text': text.lower(),\n 'completion': {\n 'field': 'suggest'\n }\n }\n })\n\n if results[USER_SUGGEST][0]['length'] > 0:\n return results[USER_SUGGEST][0]['options']\n\n return []", "def handle_wiki_query(self, message):\n # Talk to the user, as this can take a little time...\n search = message.data.get(\"ArticleTitle\")\n self.speak_dialog(\"searching\", {\"query\": search})\n\n try:\n self._lookup(search)\n except wiki.PageError:\n self._lookup(search, auto_suggest=False)\n except Exception as e:\n self.log.error(\"Error: {0}\".format(e))", "def search_convo_list(update, context):\n if update.callback_query:\n query = update.callback_query\n chat = Chat.get(query.message.chat_id)\n bot = context.bot\n else:\n chat = Chat.get(update.message.chat_id)\n\n if not chat.cart.subscriptions:\n text = '⁉️ Du har ingen gemte søgninger.'\n else:\n text = '\\n'.join([\n f'🔎 {i}. {sub.query} ({sub.price} kr.)'\n for i, sub in enumerate(chat.cart, start=1)\n ])\n text = f'Her er dine søgninger:\\n{text}'\n\n if update.callback_query:\n bot.edit_message_text(chat_id=chat.chat_id,\n message_id=query.message.message_id,\n text=text)\n else:\n update.message.reply_text(text)\n\n return ConversationHandler.END", "def _update_subscribers(self):\n try:\n campaign = self.campaigns.latest('when')\n except StudyGuideCampaign.DoesNotExist:\n pass\n else:\n for student in utils.students_for_event(self.event):\n subscriber, created = StudyGuideCampaignSubscriber.objects.get_or_create(\n campaign=campaign,\n user=student.user)\n if created: # only add if it's not there already\n campaign.subscribers.add(subscriber)", "def get_suggestions(reviewer: Any, graph: Graph, threshold: int = 10) -> list[Any]:\n reviewers_so_far = helper(reviewer, graph)\n\n sim_scores = {}\n\n for user in reviewers_so_far:\n sim_score = round(graph.get_similarity_score(user, reviewer), 2)\n\n if sim_score > 0:\n if sim_score not in sim_scores:\n sim_scores[sim_score] = [user]\n else:\n sim_scores[sim_score].append(user)\n\n recommendations_so_far = set()\n\n while len(recommendations_so_far) < threshold and len(sim_scores) > 0:\n similar_reviewers = sim_scores[max(sim_scores)]\n\n if similar_reviewers != []:\n sim_user = similar_reviewers.pop(random.randint(0, len(similar_reviewers) - 1))\n rec_movies = graph.suggest_movies(reviewer, sim_user)\n for movie in rec_movies:\n recommendations_so_far.add(movie)\n\n else:\n sim_scores.pop(max(sim_scores))\n\n recommendations = list(recommendations_so_far)\n\n while len(recommendations) > threshold:\n recommendations.pop()\n\n if len(recommendations) == 0:\n return [' recommendations not found. Try adding more movies!']\n\n else:\n return recommendations" ]
[ "0.5789807", "0.5548012", "0.5399799", "0.5373434", "0.5297016", "0.5262745", "0.5190848", "0.5156757", "0.5143706", "0.5134736", "0.51071465", "0.50839084", "0.50619185", "0.5059515", "0.49956036", "0.495569", "0.491617", "0.49068686", "0.49051824", "0.4902548", "0.48760507", "0.4874685", "0.48573795", "0.48326108", "0.47919068", "0.47834754", "0.47808653", "0.47747323", "0.4769813", "0.47636235" ]
0.7172281
0
Mapper to convert json row into a row with only comments and caption in string formats
def mapRow(row): commentsRow = row.comments captionRow = row.caption comments = commentsRow.data # select comments textComments = " ".join([x.text for x in comments]) # remove metadata from comments if hasattr(captionRow, "edges"): captions = captionRow.edges textCaptions = " ".join([x.node.text for x in captions]) if hasattr(captionRow, "text"): textCaptions = captionRow.text if not row.tags is None: tags = " ".join([x for x in row.tags]) else: tags = "" textComments = textComments.replace("\n", " ") textComments = textComments.replace("\t", " ") textComments = textComments.replace(",", " ") textCaptions = textCaptions.replace("\n", " ") textCaptions = textCaptions.replace("\t", " ") textCaptions = textCaptions.replace(",", " ") tags = tags.replace("\n", " ") tags = tags.replace("\t", " ") tags = tags.replace(",", " ") if len(row.urls) > 0: url = row.urls[0] else: url = "missing-url" id = row.id return pyspark.sql.Row(comments=textComments, caption=textCaptions, tags=tags, id=id, url=url)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_wiki_row(row):\n out = row.copy()\n # Split comma separated fields\n out['aliases'] = row['aliases'].split(',')\n out['descriptions'] = row['descriptions'].split(',')\n # Convert JSON string to dict\n out['claims'] = json.loads(row['claims'])\n return out", "def __data_row_to_json(self, row):\n raw_data = {}\n raw_data[\"body\"] = row.body\n raw_data[\"score_hidden\"] = row.score_hidden\n raw_data[\"archived\"] = row.archived\n raw_data[\"name\"] = row.name\n raw_data[\"author\"] = row.author\n raw_data[\"author_flair_text\"] = row.author_flair_text\n raw_data[\"downs\"] = row.downs\n raw_data[\"created_utc\"] = row.created_utc\n raw_data[\"subreddit_id\"] = row.subreddit_id\n raw_data[\"link_id\"] = row.link_id\n raw_data[\"parent_id\"] = row.parent_id\n raw_data[\"score\"] = row.score\n raw_data[\"retrieved_on\"] = row.retrieved_on\n raw_data[\"controversiality\"] = row.controversiality\n raw_data[\"gilded\"] = row.gilded\n raw_data[\"id\"] = row.id\n raw_data[\"subreddit\"] = row.subreddit\n raw_data[\"ups\"] = row.ups\n raw_data[\"distinguished\"] = row.distinguished\n raw_data[\"author_flair_css_class\"] = row.author_flair_css_class\n\n return json.dumps(raw_data)", "def format(self, row):\n return json.dumps(row.print_fields)", "def conform_output_data(rowdict,fields_to_show=''):\n rowdict['TimeStamp'] = str(rowdict['TimeStamp'])\n if fields_to_show:\n rowdict= removed_fields(fields_to_show, rowdict)\n return rowdict", "def transform_json_to_dss_columns(row_obj):\n row = {}\n for keys, value in iterate_dict(row_obj):\n row[\".\".join(keys)] = value if value is not None else ''\n return row", "def unnest_json(row_obj):\n row = {}\n for keys, value in iterate_dict(row_obj):\n row[\".\".join(keys)] = value if value is not None else ''\n return row", "def format_row(self, row):\n raise NotImplementedError()", "def comment_in_json(self):\n\t\tpass", "def convert_to_json(self, rows):\n\t\tjson_list = []\n\t\tfor row in rows:\n\t\t\tjson_record = {}\n\t\t\tjson_record[\"movie_id\"] = row[0]\n\t\t\tjson_record[\"title\"] = change_title(row[1])\n\t\t\tjson_record[\"genres\"] = row[2][:5]\n\t\t\tjson_record[\"imdb_id\"] = row[3]\n\t\t\tjson_record[\"tmdb_id\"] = row[4]\n\t\t\tjson_record[\"rating\"] = row[5]\n\t\t\tjson_record[\"number_of_ratings\"] = row[6]\n\t\t\tjson_record[\"weighted_rating\"] = row[7]\n\t\t\tjson_record[\"release_year\"] = row[8]\n\t\t\tjson_record[\"img_path\"] = row[9]\n\t\t\tjson_record[\"description\"] = row[10]\n\t\t\tjson_record[\"director\"] = row[11]\n\t\t\tjson_record[\"length\"] = row[12]\n\t\t\tjson_list.append(json_record)\n\t\treturn json.dumps(json_list, indent = 4)", "def create_json_example(row, header_csv, jstruct, delimiter, keep, dic_types):\n\n for key in header_csv:\n key_struct = key.split(delimiter)\n if key in dic_types.keys():\n # if no value indicated set to default\n if row[key] == '' and 'default' in dic_types[key].keys():\n row[key] = dic_types[key]['default']\n else:\n try:\n # Cast to indicated type\n row[key] = dic_types[key]['type'](row[key]) \n except:\n print(\" [WARN] Can not parse \", row[key] , \"to type\", dic_types[key]['type'])\n jstruct.update(update_jstruct(jstruct, key_struct, row[key], keep))\n \n return jstruct", "def _convert_comment(self, data):\n\n output = {}\n output['id'] = int(data['id'])\n output['author'] = data['user_display_name']\n output['profile_url'] = data['user_url']\n output['date'] = data['comment_added_at']\n output['date_ago'] = timeago.format(self._parse_datetime(data['comment_added_at']), datetime.now(TIMEZONE))\n output['content'] = self.convert_content(data['html'].replace('\\n', ''))\n output['is_deletable'] = data['is_deletable']\n output['is_editable'] = data['is_editable']\n\n return output", "def transform_song(filepath):\n f = json.load(open(filepath))\n return '\\t'.join([str(v) if (v := f[k]) else ''\n for k in song_cols.keys()]) + '\\n'", "def row_to_example(self, row):\n return \" \".join([row[1], self.mention_placeholder, row[3]])", "def row_to_obj(self, row, cur):\n obj = tornado.util.ObjectDict()\n for val, desc in zip(row, cur.description):\n obj[desc.name] = val\n return obj", "def get_description_from_row(row):\n return [\n (\n name, # name\n get_type(value), # type_code\n None, # [display_size]\n None, # [internal_size]\n None, # [precision]\n None, # [scale]\n get_type(value) == Type.STRING, # [null_ok]\n )\n for name, value in row.items()\n ]", "def dict_factory(cursor, row):\n dic = {}\n for idx, col in enumerate(cursor.description):\n if isinstance(row[idx], unicode):\n dic[col[0]] = u.unicode_to_string(row[idx])\n else:\n dic[col[0]] = row[idx]\n return dic", "def __parse_json(df):\n\t\tcol_names = ['genres', 'production_companies', 'production_countries', 'cast', 'crew', 'spoken_languages',\n\t\t\t\t\t 'Keywords']\n\t\tvalue_names = ['name', 'name', 'iso_3166_1', 'name', 'name', 'name', 'name']\n\t\tfor col_name, value_name in zip(col_names, value_names):\n\t\t\t# df[col_name] = df[col_name].fillna(\"{}\")\n\t\t\tdf[col_name] = df[col_name].apply(literal_eval_error_handling)\n\t\t\tdf[col_name] = df[col_name].apply(lambda x: [i[value_name] for i in x])\n\t\treturn df", "def represent_row(self, row):\n\n # Custom Row (with the Orgs left-joined)\n organisation_id = row[\"project_activity_organisation.organisation_id\"]\n if organisation_id:\n return self.org_represent(organisation_id)\n else:\n # Fallback to name\n name = row[\"project_activity.name\"]\n if name:\n return s3_str(name)\n else:\n return current.messages[\"NONE\"]", "def represent_row(self, row, prefix=None):\n\n # Custom Row (with the Orgs left-joined)\n organisation_id = row[\"project_activity_organisation.organisation_id\"]\n if organisation_id:\n return self.org_represent(organisation_id)\n else:\n # Fallback to name\n name = row[\"project_activity.name\"]\n if name:\n return s3_str(name)\n else:\n return current.messages[\"NONE\"]", "def represent_row(self, row, prefix=None):\n\n # Custom Row (with the Orgs left-joined)\n organisation_id = row[\"project_activity_organisation.organisation_id\"]\n if organisation_id:\n return self.org_represent(organisation_id)\n else:\n # Fallback to name\n name = row[\"project_activity.name\"]\n if name:\n return s3_str(name)\n else:\n return current.messages[\"NONE\"]", "def _parse_row(self, row):\n data = {\n '_type': 'event',\n 'event_description': '',\n 'classification': COMMITTEE,\n 'all_day': False,\n 'documents': [],\n 'sources': self._parse_sources(),\n 'name': self._parse_name(row),\n 'start': self._parse_start(row),\n 'end': self._parse_end(row),\n 'location': self._parse_location(row)\n }\n data['id'] = self._generate_id(data)\n data['status'] = self._generate_status(data)\n return data", "def to_json_line(bq_row):\n row = dict()\n for key in bq_row:\n row[key] = bq_row[key]\n\n # default=str converts non JSON serializable objects to str eg datetime.datetime\n row_json = json.dumps(row, default=str)\n return row_json.encode('utf-8')", "def dict_factory(cursor, row):\n rowdict = {}\n for idx, col in enumerate(cursor.description):\n rowdict[col[0]] = row[idx]\n return rowdict", "def test_table_rst():\n in_json = {\n \"pandoc-api-version\": [1, 17, 5, 1],\n \"meta\": {\n \"$$references\": {\n \"t\": \"MetaMap\",\n \"c\": {\n \"tbl:id\": {\n \"t\": \"MetaMap\",\n \"c\": {\n \"type\": {\"t\": \"MetaString\", \"c\": \"Table\"},\n \"number\": {\"t\": \"MetaString\", \"c\": \"1\"},\n },\n }\n },\n }\n },\n \"blocks\": [\n {\n \"t\": \"Para\",\n \"c\": [\n {\"t\": \"Str\", \"c\": \"Some\"},\n {\"t\": \"Space\"},\n {\"t\": \"Str\", \"c\": \"text\"},\n ],\n },\n {\n \"t\": \"Div\",\n \"c\": [\n [\"tbl:id\", [\"labelled-Table\"], []],\n [\n {\n \"t\": \"Table\",\n \"c\": [\n [{\"t\": \"Str\", \"c\": \"Caption.\"}, {\"t\": \"Space\"}],\n [{\"t\": \"AlignDefault\"}, {\"t\": \"AlignDefault\"}],\n [0, 0],\n [\n [{\"t\": \"Plain\", \"c\": [{\"t\": \"Str\", \"c\": \"a\"}]}],\n [{\"t\": \"Plain\", \"c\": [{\"t\": \"Str\", \"c\": \"b\"}]}],\n ],\n [\n [\n [{\"t\": \"Plain\", \"c\": [{\"t\": \"Str\", \"c\": \"1\"}]}],\n [{\"t\": \"Plain\", \"c\": [{\"t\": \"Str\", \"c\": \"2\"}]}],\n ],\n [\n [{\"t\": \"Plain\", \"c\": [{\"t\": \"Str\", \"c\": \"4\"}]}],\n [{\"t\": \"Plain\", \"c\": [{\"t\": \"Str\", \"c\": \"5\"}]}],\n ],\n ],\n ],\n }\n ],\n ],\n },\n ],\n }\n out_string = apply_filter(\n in_json, format_label_elements.main, \"rst\", in_format=\"json\"\n )\n\n assert out_string.strip().splitlines()[0:3] == [\"Some text\", \"\", \".. _`tbl:id`:\"]", "def format_row(row):\n assert isinstance(row,list)\n \n data_row=[0]*len(header) #Formatted data row to be output and appeneded to 'data'\n \n for i in [0,1,11,13,14,15,16,17,19,20,21,28,31,45,46,47,48]: data_row[i]=row[i] #emptry string will NOT return None\n for i in [2,3,12,18]: data_row[i]=type_cast(lambda x: int(float(x)),row[i])\n for i in [6,7,8,9,10,23,24,25,26,27,29,30]: data_row[i]=type_cast(float,row[i])\n for i in [4,5,22]: data_row[i]=type_cast(datetime.strptime,row[i],'%Y-%m-%d %H:%M:%S')\n for i in range(32,45):\n if row[i]=='False': data_row[i]=False #bool('False') returns True!\n elif row[i]=='True': data_row[i]=True\n else: data_row[i]=None\n return data_row", "def build_row(raw_row):\n temp_row = dict()\n ### Plan\n # Add email addresses to row\n # If message == Clicked or message == Submitted data\n ## Append 'Time Clicked' to dict. Format MM/DD/YYYY | HH:mm\n ## If message == Submitted data\n ### Append Credentials Harvested: Yes to dict\n ## Else:\n ### Append Credentials Harvested: No to dict\n # Append Reported: No, Replied to Email: No, Notes: ''\n\n # Append email\n temp_row['Email Address'] = raw_row['email']\n\n if raw_row['message'] == 'Clicked Link' or raw_row['message'] == 'Submitted Data':\n # print(raw_row['time'])\n # print(arrow.get(raw_row['time'], 'YYYY-MM-DDTHH:mm:ss.SSSSSSSSS-ZZ').format('MM/DD/YYYY | HH:mm'))\n temp_row['Time Clicked'] = arrow.get(raw_row['time'], 'YYYY-MM-DDTHH:mm:ss.SSSSSSSSS-ZZ').format('MM/DD/YYYY | HH:mm')\n if raw_row['message'] == 'Submitted Data':\n temp_row['Credentials Harvested'] = 'Yes'\n else:\n temp_row['Credentials Harvested'] = 'No'\n else:\n temp_row['Time Clicked'] = 'N/A'\n temp_row['Credentials Harvested'] = 'No'\n\n temp_row.update({'Reported': '', 'Replied to Email': '', 'Notes': ''})\n return temp_row", "def _row_to_labels(row):\n labels = {}\n label_keys = ['name', 'qty', 'range_end', 'unit', 'comment']\n for key in label_keys:\n labels[key] = row[key]\n return labels", "def convert(cell):\r\n\r\n markdownResult=\"\"\r\n if cell['cell_type'] == 'code':\r\n markdownResult += '```\\n'\r\n\r\n for line in cell['source']: \r\n markdownResult += line\r\n\r\n if cell['cell_type'] == 'code':\r\n markdownResult += '\\n```'\r\n \r\n debugPrint(markdownResult)\r\n markdownResult += '\\n\\n'\r\n \r\n return markdownResult", "def make_toml_row(\n txt_row: Mapping[str, Any],\n colgroups: Iterable[ColumnGroupRule],\n columns_with_colgroups: Iterable[Union[ColumnGroupRule, str]],\n) -> Dict[str, Any]:\n # Black magic to avoid creating more than one dictionary per row\n toml_row = {}\n\n # Fill the row dict with colgroups, as well as columns that have value.\n for name_or_colgroup in columns_with_colgroups:\n if isinstance(name_or_colgroup, ColumnGroupRule):\n # Column groups are pre-assigned in their correct positions\n # Rely on preservation of insertion order of dicts Python 3.6+\n toml_row[name_or_colgroup.alias] = None\n else:\n value = txt_row[name_or_colgroup]\n if not (value is None or value == \"\"):\n toml_row[name_or_colgroup] = encode_toml_value(name_or_colgroup, value)\n\n # Replace member columns if they can be packed into column groups\n for column_group in colgroups:\n packed_values = pack_colgroup(column_group.schema, toml_row)\n if packed_values:\n toml_row[column_group.alias] = packed_values\n for name in column_group.member_names():\n toml_row.pop(name, None)\n else:\n del toml_row[column_group.alias]\n\n return toml_row", "def transform(self):\n with open(self.csv_path, \"r\") as f:\n csv_entries = [{k: v for k, v in row.items()} for row in csv.DictReader(f, skipinitialspace=True)]\n\n nested_fields = get_nested_fieldnames(csv_entries[0])\n # values of these fields should be transformed to a list\n # list_fields = set()\n # for entry in csv_entries:\n # for k, v in entry.items():\n # if '||' in v:\n # list_fields.add(k)\n list_fields = {\n \"BITSTREAM Download URL\",\n \"BITSTREAM License\",\n \"BITSTREAM Webshop URL\",\n \"dc.contributor\",\n \"dc.contributor.author\",\n \"dc.contributor.editor\",\n \"dc.date.available\",\n \"dc.date.accessioned\",\n \"dc.date.issued\",\n \"dc.date.submitted\",\n \"dc.dateSubmitted\",\n \"dc.description.abstract\",\n \"dc.description.provenance\",\n \"dc.grantproject\",\n \"dc.identifier\",\n \"dc.identifier.pr\",\n \"dc.language\",\n \"dc.notes\",\n \"dc.number\",\n \"dc.redirect\",\n \"dc.relation.ispartofseries\",\n \"dc.relationisFundedBy\",\n \"dc.subject\",\n \"dc.subject.classification\",\n \"dc.subject.other\",\n \"dc.title\",\n \"dc.title.alternative\",\n \"dc.type\",\n \"oapen.collection\",\n \"oapen.grant.number\",\n \"oapen.grant.program\",\n \"oapen.imprint\",\n \"oapen.relation.hasChapter\",\n \"oapen.relation.hasChapter_dc.title\",\n \"oapen.relation.isFundedBy\",\n \"oapen.relation.isFundedBy_grantor.name\",\n \"oapen.relation.isPartOfBook\",\n \"oapen.relation.isPartOfBook_dc.title\",\n \"oapen.relation.isPublishedBy_publisher.name\",\n \"oapen.relation.isPublisherOf\",\n \"oapen.relation.isbn\",\n \"oapen.remark.public\",\n \"peerreview.anonymity\",\n \"peerreview.id\",\n \"peerreview.open.review\",\n \"peerreview.publish.responsibility\",\n \"peerreview.review.decision\",\n \"peerreview.review.stage\",\n \"peerreview.review.type\",\n \"peerreview.reviewer.type\",\n }\n # add custom 'dc.subject.classification_code'\n list_fields.add(\"dc.subject.classification_code\")\n entries = transform_dict(csv_entries, convert, nested_fields, list_fields)\n\n # Transform release into JSON Lines format saving in memory buffer\n # Save in memory buffer to gzipped file\n list_to_jsonl_gz(self.transform_path, entries)" ]
[ "0.65076196", "0.6053189", "0.5902629", "0.57102025", "0.57087696", "0.5598717", "0.5575147", "0.55525213", "0.55045193", "0.54885995", "0.5486372", "0.53834856", "0.53592014", "0.533789", "0.52786314", "0.5270643", "0.52492356", "0.5215063", "0.5192689", "0.5192689", "0.5163411", "0.5161917", "0.5158407", "0.5152792", "0.51454127", "0.5143391", "0.5118127", "0.51077646", "0.5090447", "0.5063841" ]
0.73029524
0
Writes the processed RDD to output file
def writeToFile(rdd, parallelized, output, user, format, features): fileEnd = "." + format output_path = output + "/ig/" + user + "/" + user + fileEnd if parallelized: rdd.saveAsTextFile(output_path) else: arr = np.array(rdd.collect()) if not os.path.exists(os.path.dirname(output_path)): os.makedirs(os.path.dirname(output_path)) with open(output_path, 'w+') as tsvfile: for row in arr: if format == "json": tsvfile.write(row.encode("utf-8", errors='ignore') + "\n") else: tsvfile.write(row + "\n") if not format == "json": output_path = output + "/ig/" + user + "/" + user + ".txt" saveCorpusFile(output_path, arr, format, features)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def beginFileOutput(self):\n self._outputFilepath = self.dataSet[self._outputFileLabel]\n self._outputFile = open(self._outputFilepath, 'w')", "def writeto(self, fileout):\n \n dump_pkl(self.data, fileout)", "def __export_file(self, filename, output):\n outfile = open(filename, \"w\")\n outfile.write(output)\n outfile.close\n print(\"Output written to file: \" + filename + \"\\n\")", "def write_to(self, filename):\n with open(filename, 'w') as f:\n for xx, yy, zz, ww in zip(self.x, self.y, self.field, self.weight):\n f.write(\"%s %s %s %s\\n\" % (xx, yy, zz, ww))\n logger.info(\"Written data into file {0}\".format(filename))", "def dump(self, mode, glue=' ', path=None, codec=None, filename=None):\n if self._coord_format != constants.MatrixCoordinateDefault:\n self._logger.error(\"invalid coordinate format\")\n raise NotImplementedError(\"invalid coordinate format\")\n\n rdd = self.clear().data.map(\n lambda m: glue.join((str(m[0]), str(m[1]), str(m[2])))\n )\n\n if mode == constants.DumpingModeUniqueFile:\n data = rdd.collect()\n\n with open(filename, 'a') as f:\n for d in data:\n f.write(d + \"\\n\")\n elif mode == constants.DumpingModePartFiles:\n rdd.saveAsTextFile(path, codec)\n else:\n self._logger.error(\"invalid dumping mode\")\n raise ValueError(\"invalid dumping mode\")", "def writeOutput(self):\n\n self.collect.writeOutput()", "def write_output(basis, filename):\n\n logging.info('Writing output to {}'.format(filename))\n\n basis.to_csv(filename)", "def writeOutput(self, output):", "def write_file(self):\n\n running_time = str(self.running_time_end - self.running_time_start)\n rounded_running_time = '{:.10}'.format(running_time)\n output = 'path_to_goal: ' + str(self.path_to_goal) + '\\n'\n output += 'cost_of_path: ' + str(self.cost_of_path) + '\\n'\n output += 'nodes_expanded: ' + str(self.nodes_expanded) + '\\n'\n output += 'fringe_size: ' + str(self.fringe_size) + '\\n'\n output += 'max_fringe_size: ' + str(self.max_fringe_size) + '\\n'\n output += 'search_depth: ' + str(self.search_depth) + '\\n'\n output += 'max_search_depth: ' + str(self.max_search_depth) + '\\n'\n output += 'running_time: ' + rounded_running_time + '\\n'\n\n system_name = system()\n if system_name == 'Windows':\n output += 'max_ram_usage: (Not available on Windows OS)'\n elif system_name == 'Linux':\n output += 'max_ram_usage: ' + \\\n str(getrusage(RUSAGE_SELF).ru_maxrss / 1024) + '\\n'\n\n file = open('output.txt', 'w+')\n file.write(output)\n print(output)", "def write(self, outfilename):\n\n nx.write_gpickle(self.graph, outfilename)", "def write_output_shifts_to_file(self, shift_output):\n pass", "def save_elem_file(self, output):\n with open(output, 'wb') as fid:\n self._write_elem_header(fid)\n self._write_nodes(fid)\n self._write_elements(fid)\n self._write_neighbors(fid)", "def create_output_files(self):\n namenode = self.runner.namenode\n for i in range(self.cnt_reducers):\n fname = '%s.%s' % (self.output_dir, reduce_output(self.id, i))\n namenode.create_file(fname)\n self.result_files.append(fname)\n self.open_files.append(fname)\n\n for j in range(self.cnt_mappers):\n fname = map_output(self.id, j, i)\n namenode.create_file(fname)\n self.open_files.append(fname)", "def visited_nodes_to_file(self):\r\n # Create and write file only if we have something to write\r\n if len(self.visited_node) > 0:\r\n with open('{}'.format(self.path), mode='w') as f:\r\n # Writing line by line to the file\r\n for node, val in self.visited_node:\r\n f.write('{} {}\\n'.format(self.convert_matrix_rastor(node), val))", "def write_to_file(self, filename: str) -> None:", "def write(self, filename):\n pass", "def write(self, filename):\n pass", "def write(self, outputFile):\n \n try: \n f = open(outputFile + '.py', 'w')\n for trail in self.trails: \n f.write(\"[\")\n for index in trail:\n f.write(\"({0}, {1}), \".format(*index)) \n f.write(\"]\\n\")\n \n except IOError, e:\n msg = \"Exception encountered when attempting \" + \\\n \"to write data to file: {0}.\" + \\\n \"\\n\\t -- Exception was: {1}\" + \\\n \"\\n\\t For help use --help\".format(outputFile, e)\n raise Usage(e)", "def save_output(self, output_file_path):\r\n self.output_file.save(output_file_path)", "def write_output(series, filename):\n\n logging.info('Writing output')\n\n df = series.reset_index()\n\n df.columns = ['subject_id', 'classification']\n\n df.to_csv(filename, index=False)", "def write_output(self):", "def _toFile(self):\n pass", "def write_output_file(self, index):\n ctx = self.block_store.make_local_output(self.expected_outputs[index])\n self.open_output_contexts[index] = ctx\n return ctx.get_filename()", "def write_model(clf, filename):\n joblib.dump(clf, filename)", "def write_the_contents_to_the_same_file(self):\n if not len(self.student_list):\n print('There is no contents to write')\n return\n\n if self._filename is None:\n self._filename = self.input_filename()\n\n with open(self._filename, 'w') as OUT:\n OUT.write(self.student_list.to_csv(date_format='%Y-%m-%d',\n sep='\\t', header=False, columns=self.columns_to_save))\n print(f'Data are saved into {self._filename!r}')", "def export(fileName, result):\n with open(fileName, 'a') as output:\n output.write(result)", "def to_file(self, fn):\n store.store_dict(fn, 'trainalgorithm', self.to_dict())", "def write_output(output_dir, df_out):\n # Make stage output dir\n output_dir = os.path.join(output_dir, 'transform')\n if not os.path.isdir(output_dir):\n os.mkdir(output_dir)\n\n # Write dfs to files\n for model_cls_name, df in df_out.items():\n fp = os.path.join(output_dir, model_cls_name + '.tsv')\n df.to_csv(fp, sep='\\t')", "def _write_keypoint_results(keypoint_results, gt_folder, pred_folder):", "def write(self, out):" ]
[ "0.63989294", "0.63781404", "0.63742775", "0.63496083", "0.63116854", "0.627859", "0.627706", "0.6234401", "0.6204154", "0.62020314", "0.61875045", "0.6172294", "0.61649185", "0.6143975", "0.61350286", "0.60889685", "0.60889685", "0.6072765", "0.60644495", "0.6057928", "0.60351914", "0.6028792", "0.6028296", "0.60254455", "0.60125214", "0.59891844", "0.5986639", "0.5975856", "0.5967381", "0.5962317" ]
0.79359454
0
Cleans the output directory to make room for new outputs
def cleanOutputDir(output): if os.path.exists(output) and os.path.isdir(output): shutil.rmtree(output)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def devclean():\n click.echo(\"start clean your output folder...\")\n rm(OUTPUTDIR, recursive=True)", "def clear_local_output_directory():\n output_path = '../output/*'\n files = glob.glob(output_path)\n for single_file in files:\n os.remove(single_file)", "def clean_folder(self):\n # Remove the 1st output\n # Remove the 2nd output\n # Remove the calibrated output\n try:\n os.remove(\"output1.csv\")\n except:\n pass\n try: \n os.remove(\"output2.csv\")\n except:\n pass\n try:\n os.remove(self.__add_output_file_location(self._output_filename))\n except:\n pass\n \n list = os.listdir(\"edited\")\n for file in list:\n file = os.path.join(\"edited\", file)\n try:\n os.remove(file)\n except:\n pass\n \n list = os.listdir(\"extracted\")\n for file in list:\n file = os.path.join(\"extracted\", file)\n try:\n os.remove(file)\n except:\n pass", "def clearOutputDirectory(self):\n for file in os.listdir(self.config[\"outputPath\"]):\n self.logger.info(\"Deleting old output file: {0}\".format(file))\n os.remove(os.path.join(self.config[\"outputPath\"], file))", "def clean_dirs(output_dir):\n if os.path.exists(output_dir):\n shutil.rmtree(output_dir)\n os.makedirs(output_dir)", "def clean(self) -> None:\n if self.out_dir.exists():\n shutil.rmtree(self.out_dir)", "def clean_outputs(remit, sourcelist):\n if not os.path.exists('output-'+remit):\n os.mkdir('output-'+remit)\n for source in sourcelist:\n os.chdir('output-'+remit)\n if os.path.exists(source):\n shutil.rmtree(source)\n print('* deleted old \"output-%s/%s\"' % (remit, source))\n os.mkdir(source)\n # os.chdir(source)\n # os.mkdir('debug')\n # os.chdir('..')\n os.chdir('..')", "def clean(self):\n original_dir = os.getcwd()\n os.chdir(self.output)\n\n # Clear out directory\n file_list = os.listdir(self.output)\n\n for afile in file_list:\n if not afile.endswith('.gitignore'):\n path = os.path.join(self.output, afile)\n if os.path.isdir(path):\n rmtree(path)\n else:\n os.remove(path)\n os.chdir(original_dir)", "def clean(self):\n print(\"Cleaning outputs in %s\" % self.args.output)\n files = glob.glob(self.args.output + \"*.pkl\")\n for f in files:\n if os.path.exists(f):\n os.remove(f)", "def clean(path=None):\n conf.load(path)\n logger.info('cleaning output...')\n helpers.rmdir(conf.get('build_path'))\n logger.info('done')", "def clean(args):\n log = 'removing tmp dir %s ' % (args.tmpdir)\n if args.tmpdir.endswith('STAR'):\n cmd = ['rm -rf %s' % (args.tmpdir)]\n run_subprocess(cmd,args,log)\n log = \"remove tmp files from output dir\"\n cmd = ['mv %s/crick_joinedLog.final.out %s/Crick_joinedLog.final.out' % (args.output_dir, args.output_dir)]\n run_subprocess(cmd, args, log)\n cmd = ['mv %s/watson_joinedLog.final.out %s/Watson_joinedLog.final.out' % (args.output_dir, args.output_dir)]\n run_subprocess(cmd, args, log)\n cmd = ['mv %s/crick_mergedLog.final.out %s/Crick_mergedLog.final.out' % (args.output_dir, args.output_dir)]\n run_subprocess(cmd, args, log)\n cmd = ['mv %s/watson_mergedLog.final.out %s/Watson_mergedLog.final.out' % (args.output_dir, args.output_dir)]\n run_subprocess(cmd, args, log)\n cmd = ['rm -rf %s/crick_*' % args.output_dir]\n run_subprocess(cmd, args, log)\n cmd = ['rm -rf %s/watson_*' % args.output_dir]\n run_subprocess(cmd, args, log)\n cmd = ['rm -rf %s/joined* header.sam' % args.output_dir]\n run_subprocess(cmd, args, log)", "def clear(self):\r\n shutil.rmtree(self._output_dir, ignore_errors=True)", "def clean_outputs(self) -> None:\n\n def _delete_if_not_none(fn: Optional[str]) -> None:\n if fn is not None:\n Path(fn).unlink()\n\n _delete_if_not_none(self.config[\"LOG_FILE\"])\n\n for file_ in self.exporter.get_all_files():\n file_.unlink()", "def setup_outdir():\n try:\n shutil.rmtree(OUTDIR)\n except FileNotFoundError:\n pass\n os.makedirs(OUTDIR, exist_ok=True)", "def clean_output_folder(output_folder):\n for root, dirs, files in os.walk(output_folder):\n for f in files:\n os.unlink(os.path.join(root, f))\n for d in dirs:\n shutil.rmtree(os.path.join(root, d))", "def cleanup_output(output_name):\n print(\"Removing {}\".format(output_name))\n os.remove(output_name)", "def clean(self):\n actual_output_file = path.splitext(self.source_name)[0] + \".actual\"\n if path.exists(self.binary_name):\n os.unlink(self.binary_name)\n if path.exists(actual_output_file):\n os.unlink(actual_output_file)", "def cleanup(self):\n\n print \"Cleaning up...\",\n sys.stdout.flush()\n\n builddir = os.path.join(self.build)\n\n comm = 'rm -rf '+builddir\n #+' '+libdir+' '+logdir\n (output, error, retz) = runShellCommand(comm)\n\n print \"done.\"", "def cleanUpTemporaryFiles(options):\n os.system(\"rm \"+options.output_directory_per_run+\"/*.abundance\")\n os.system(\"rm \"+options.output_directory_per_run+\"/*.phasing_score\")\n os.system(\"rm \"+options.output_directory_per_run+\"/*regionsOfInterest*\")\n os.system(\"mv \"+options.output_directory_per_run+\"/* \"+options.output_directory_per_run+\"/../\")\n os.system(\"rm -rf \"+options.output_directory_per_run)", "def flush_outputs():\n try:\n shutil.rmtree(ROOT_OUTPUT_DIR)\n print(\"Removed directory '{}'!\".format(ROOT_OUTPUT_DIR))\n return True\n except FileNotFoundError:\n print(\"Directory '{}' already removed!\".format(ROOT_OUTPUT_DIR))\n return False", "def clean_cwd():\n\n # Generator of the files generated for each runs\n del_files = (file for file in os.listdir() if file.endswith('.vtk')\n or file.endswith('.dat')\n or file.startswith('eeldata')\n or file.endswith('.log'))\n\n for file in del_files:\n try:\n os.remove(file)\n print(\"\\rRemoved {:s} succesfully!\".format(file), end=' '*15)\n except:\n print(\"\\rFailed to remove {:s}\".format(file))\n raise\n\n print('')", "def remove_output(path: str) -> None:\n try:\n Stat.remove(path)\n global remove_empty_directories # pylint: disable=invalid-name\n while remove_empty_directories.value:\n path = os.path.dirname(path)\n Stat.rmdir(path)\n Logger.file(f\"Remove the empty directory: {path}\")\n except OSError:\n pass", "def cleanup(self):\n self.__log('Resetting value for output_filename, making way for another go.')\n self.output_filename = None", "def clean_directory():\n if os.path.exists('data'):\n shutil.rmtree('data')\n os.makedirs('data')\n\n if os.path.exists('returns'):\n shutil.rmtree('returns')\n os.makedirs('returns')", "def cleanup_intermediate_files():\n\n dirs = (DIR_PAGE, DIR_SRGB, DIR_VTI, DIR_TIFF, DIR_BACK, DIR_TEXT)\n map(lambda dir: shutil.rmtree(os.path.join(cwd, dir)) , dirs)", "def __manage_output_folder(self):\n if not os.path.exists(self.output_folder):\n os.makedirs(self.output_folder)", "def clean():\n possible_outputs = (\n '{}.html'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.epub'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.pdf'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.docx'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.odt'.format(CONFIG['FULL_PROJECT_NAME']),\n )\n\n for filename in possible_outputs:\n if os.path.exists(filename):\n os.remove(filename)\n print(\"Removed {}\".format(filename))", "def _FinaliseForTest():\n global outdir\n\n if outdir:\n _RemoveOutputDir()\n outdir = None", "def clean(self):\n if self.options.format != 'svg':\n for svgfile in self.svgouts.itervalues():\n os.remove(svgfile)\n os.rmdir(self.tmpdir)", "def clearRunDirectory(self):\n for root, dirs, files in os.walk(self.run_dir, topdown=False):\n for name in files:\n if name.lower().endswith(('.cps', '.txt', '.sbml', '.csv')):\n os.remove(os.path.join(root, name))\n for name in dirs:\n if len(os.listdir(os.path.join(root, name)))==0:\n os.rmdir(os.path.join(root, name))" ]
[ "0.8009365", "0.78965557", "0.77991366", "0.7785498", "0.77021986", "0.75034577", "0.7468919", "0.7352185", "0.7345541", "0.73332447", "0.7230946", "0.7224569", "0.72211844", "0.7177135", "0.7118948", "0.71054673", "0.7068527", "0.70609295", "0.70411116", "0.70172423", "0.6992937", "0.69748104", "0.68810326", "0.68738985", "0.6863476", "0.68294036", "0.6823736", "0.68068725", "0.6802199", "0.66952425" ]
0.81068283
0
Appends corpus files for all users to a single corpus file
def append_corpus(output): files = [] output_path = output + "/ig/" + "ig_corpus.txt" for root, directories, filenames in os.walk(output + "/ig/"): for filename in filenames: files.append(os.path.join(root, filename)) corpusfiles = filter(lambda x: ".txt" in x, files) if not os.path.exists(os.path.dirname(output_path)): os.makedirs(os.path.dirname(output_path)) with open(output_path, "w+") as corpusFile: for file in corpusfiles: fileH = open(file, "r") corpusFile.write(fileH.read())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_corpus_to_file(output_file, corpus): \n \n file = open(output_file, 'w')\n for line in corpus: \n file.write(line)\n print ('Corpus has been writted in file')\n file.close()", "def combine_documents(path=os.path.join(os.curdir, \"data/processed\"), name='corpus.txt'):\n outname=os.path.join(path, name)\n if os.path.exists(outname):\n os.remove(outname)\n filenames = [f for f in os.listdir(path) if fnmatch.fnmatch(f, '*.txt')]\n with open(outname, 'w') as outfile:\n print \"Combining documents...\"\n for fname in filenames:\n print fname\n with open(os.path.join(path, fname)) as infile:\n outfile.write(infile.read())", "def build_corpus(self):\n print(\"Inside the build_corpus >>>>>\")\n documentsCount = 0\n documents = self.documents\n\t\t\n with open(self.documents_path) as file:\n for documents in file.readlines():\n documents = documents.rstrip('}\\n ').strip('0\\t').strip('1\\t').split(' ')\n documentsCount = documentsCount +1\n self.documents.append(documents)\n\t\t\t\n self.number_of_documents = documentsCount", "def corpusWriter(self):\n with open('corpus.txt', 'w') as file:\n for quote in self.quotes:\n file.write(quote + '\\n')", "def build_corpus(username, api):\n print('getting tweets for user: ', username)\n timeline = api.GetUserTimeline(screen_name=username, count=200)\n tweets = [t.text for t in timeline]\n corpus = ' '.join(tweets)\n return corpus", "def save_corpus(events_df, path):\n corpus = extract_corpus(events_df)\n with open(path, 'w') as f:\n for doc in corpus:\n f.write(doc + '\\n')", "def merge():\n result = []\n for f in glob.glob(f\"{DATA_DIR}/COP*.json\"):\n with open(f, \"r\") as infile:\n result.append(json.load(infile))\n\n with open(f\"{DATA_DIR}/corpus.json\", \"w\", encoding=\"utf-8\") as outfile:\n json.dump(result, outfile)", "def create_corpus_for_genre(genre):\n corpus = \"\"\n if genre in os.listdir(DATA_DIR):\n #iterate through artists\n for artist in os.listdir(DATA_DIR + \"/\" + genre + \"/\"):\n for filename in os.listdir(DATA_DIR + \"/\" + genre + \"/\" + artist + \"/\"):\n with open(DATA_DIR + \"/\" + genre + \"/\" + artist + \"/\" + filename) as f:\n corpus += f.read()\n return corpus", "def main():\n sc = pyspark.SparkContext(conf=sparkConf())\n sql = pyspark.SQLContext(sc)\n args = parse_args()\n cleanOutputDir(args.output)\n users = os.listdir(args.input)\n map(lambda user: parseUser(user, args, sql, args.partitions), users)\n corpora_stats(args.output)\n append_corpus(args.output)", "def write_data_corpus(filename, documents):\n\n with open(filename, 'wb') as f:\n for statement in documents:\n enc_statement = statement.encode('utf-8')\n f.write(enc_statement + '\\n')", "def read_corpus(dir):\n corpus = {}\n file_names = glob.glob(f\"{dir}/*\")\n for file_name in file_names:\n name = os.path.splitext(os.path.basename(file_name))[0]\n text = \" \".join(open(file_name, \"rt\").readlines())\n text = text.replace(\"\\n \\n\", \" \")\n text = text.replace(\"\\n\", \"\")\n text = text.replace(\" \", \" \")\n corpus[os.path.splitext(name)[0]] = text\n return corpus", "def save(file, corpus):\n with open(file, 'w') as f_out:\n f_out.write(corpus)", "def update_corpus(sentences):\n \n corNeg = None\n corPos = None\n corNeu = None\n try:\n corNeg = open('corpus\\\\neg.txt', 'ab')\n corPos = open('corpus\\\\pos.txt', 'ab')\n corNeu = open('corpus\\\\neu.txt', 'ab')\n except:\n print(\"Error: Loading Corpus\")\n return\n for sent_d in sentences:\n sent = sent_d[\"sentence_txt\"]\n tagged = sent_d[\"tag_id\"]\n # update corpus\n if tagged == tag.neg:\n corNeg.write('\\n'+sent)\n if tagged == tag.pos:\n corPos.write('\\n'+sent)\n if tagged == tag.neu:\n corNeu.write('\\n'+sent)\n corNeg.close()\n corPos.close()\n corNeu.close()", "def get_corpus():\n corpus_raw = []\n files = os.listdir()\n\n for name in files:\n if \".txt\" in name:\n try:\n file = open(name, \"rt\", encoding='utf8')\n data_org = file.read()\n corpus_raw.append(data_org)\n except:\n print(\"ERROR: Couldn't open a .txt file. Please ensure that the text is UTF-8 encoded.\")\n elif \".docx\" in name:\n try:\n data_org = docx2txt.process(name)\n corpus_raw.append(data_org)\n except:\n print(\"ERROR: Couldn't open a .docx file. Please ensure that the text is UTF-8 encoded.\")\n else:\n print(\"ERROR: Cannot print non .txt or .docx files. Please verify the input folder's contents.\")\n\n return corpus_raw", "def process_docs(directory, vocab):\n for filename in listdir(directory):\n if not filename.endswith('.txt'):\n continue\n path = directory + '/' + filename\n add_doc_to_vocab(path, vocab)", "def upload_corpus(self, name, directory, replace=False):\n logging.info('Not uploading corpus because no Filestore.')", "def handle(self, *args, **options):\n self.stdout.write('exporting corpus to text file')\n basetext = '\\n'.join([x.text_str for x in BaseText.objects.all() if x.check_age()])\n with open(os.path.join(BASE_DIR, 'corpus.txt'), 'w') as f:\n f.write(basetext)", "def extract_corpus(corpus_dir = \"articles\"):\n corpus = {}\n num_documents = 0\n for filename in os.listdir(corpus_dir):\n with open(os.path.join(corpus_dir, filename)) as f:\n corpus[filename] = re.sub(\"[^\\w]\", \" \", f.read()).split()\n return corpus", "def process_docs(directory, vocab):\n for file_name in listdir(directory):\n file_path = directory + '/' + file_name\n add_doc_to_vocab(file_path, vocab)", "def build_corpus(self):\n # #############################\n\n doc = metapy.index.Document()\n tok = metapy.analyzers.ICUTokenizer(suppress_tags=True)\n tok = metapy.analyzers.LowercaseFilter(tok)\n tok = metapy.analyzers.LengthFilter(tok, min=3, max=1000)\n tok = metapy.analyzers.Porter2Filter(tok)\n tok = metapy.analyzers.ListFilter(tok, \"lemur-stopwords.txt\", metapy.analyzers.ListFilter.Type.Reject)\n collection = -1\n\n with open(self.documents_path) as file:\n for num, line in enumerate(file):\n l = line.strip()\n c = int(l[0])\n l = l[2:]\n doc.content(l)\n tok.set_content(doc.content())\n if c != collection:\n self.documents.append([])\n collection = c\n self.documents[c].append([token for token in tok])\n self.number_of_collections = len(self.documents)\n self.number_of_documents = len(self.documents[0])\n #print(self.number_of_collections)\n #print(self.number_of_documents)\n #print(self.documents[0])", "def archive_corpus(self):\n total_perf_array = self.single_sequence_corpus()\n if self.verbose:\n print(total_perf_array.shape)\n data_file_name = \"TinyPerformanceCorpus.h5\"\n with h5py.File(data_file_name, 'w') as data_file:\n data_file.create_dataset('total_performances', data=total_perf_array, dtype='float32')", "def create_corpus_list(self, corpus_files):\n freq_dict = defaultdict(dict)\n\n for corpus_file in corpus_files:\n corpus_name = corpus_file.split(\"/\")[-1]\n corpus = Corpus(corpus_name, corpus_file)\n self.add_corpus(corpus)\n\n for word in corpus.word_list:\n lemma = word.lemma\n pos = word.pos\n if freq_dict.has_key((lemma,pos)):\n freq_dict[lemma][pos].add_corpus_name(word.corpus_name)\n freq_dict[lemma][pos].add_corpus_frequency(word.corpus_frequency)\n freq_dict[lemma][pos].add_synstets(word.synstets)\n else:\n freq_dict[lemma][pos] = word\n\n self.calculate_relative_probability()\n\n return deepcopy(freq_dict)", "def get_corpus():\n all_text = []\n\n for _, _, files in os.walk(DATA_DIRECTORY):\n for f in files:\n with open(os.path.join(DATA_DIRECTORY, f), 'r') as article:\n # Quotation marks rarely come out as pairs in finished chains.\n # So we remove them before adding the article text:\n all_text.append(re.sub(r'[„“]', '', article.read()))\n\n return markovify.Text(\"\".join(all_text), state_size=2)", "def load_files(directory):\n\n corp = dict()\n\n for name in os.listdir('corpus'):\n\n with open(\"corpus\" + os.sep + name, 'r') as doc :\n corp[name] = doc.read()\n\n return corp", "def load_files(directory):\n corpus_dict = dict()\n filenames = os.listdir(directory)\n for file in filenames:\n path = os.path.join('corpus',file)\n with open(path,encoding='utf-8') as f:\n text = f.read()\n corpus_dict[file] = text\n \n\n return corpus_dict", "def createTrecTextForCurrentDocuments(baseDir):\n pathToFolder = baseDir + 'Collections/'\n if not os.path.exists(pathToFolder):\n os.makedirs(pathToFolder)\n currentTime = str(datetime.datetime.now()).replace(\":\", \"-\").replace(\" \", \"-\").replace(\".\", \"-\")\n pathToTrecText = pathToFolder+\"TrecText/\"\n if not os.path.exists(pathToTrecText):\n os.makedirs(pathToTrecText)\n filename = pathToTrecText + currentTime\n client = MongoClient('asr2.iem.technion.ac.il',27017)\n db = client.asr16\n documents = db.documents.find({}).sort('query_id',1)\n queryToDocnos= {}\n current_users = retrieve_users()\n f = open(filename, 'w')\n for document in documents:\n if document['username'] in current_users:\n print(document['query_id'], document['username'])\n f.write('<DOC>\\n')\n docno = str(document['query_id']).zfill(3) + '-' + str(document['username'])\n f.write('<DOCNO>' + docno + '</DOCNO>\\n')\n docnos = queryToDocnos.get(str(document['query_id']).zfill(3), [])\n docnos.append(docno)\n queryToDocnos[str(document['query_id']).zfill(3)] = docnos\n f.write('<TEXT>\\n')\n f.write(unicodedata.normalize('NFKD', document['current_document']).encode('cp1252', \"ignore\").decode('utf-8', 'replace').replace(u'\\uFFFD', ' ').rstrip())\n f.write('\\n</TEXT>\\n')\n f.write('</DOC>\\n')\n f.close()\n pathToWorkingSet = pathToFolder+ 'WorkingSets/'\n if not os.path.exists(pathToWorkingSet):\n os.makedirs(pathToWorkingSet)\n workingSetFilename = pathToWorkingSet + currentTime\n f = open(workingSetFilename, 'w')\n for query, docnos in queryToDocnos.items():\n i = 1\n for docid in docnos:\n f.write(query.zfill(3) + ' Q0 ' + docid + ' ' + str(i) + ' -' + str(i) + ' indri\\n')\n i +=1\n f.close()\n return filename, workingSetFilename, currentTime", "def download_texts(author):\n corpus = SqliteCorpus(GutenbergEbooks(), tempfile.mkdtemp())\n\n text_generator = corpus.texts_for_author(author)\n filename = lambda text_info: re.sub(r'\\W', '-', text_info.title) + '.txt'\n\n for text_info, fulltext in text_generator:\n with open(filename(text_info), 'w') as outfile:\n outfile.write(fulltext)\n print('downloaded %s' % outfile.name, file=sys.stderr)\n\n shutil.rmtree(corpus.basedir)", "def build_corpus_gensim():\r\n\tif load_corpus_gensim():\r\n\t\treturn\r\n\t\r\n\tglobal gensim_corpus, common_corpus_list\r\n\r\n\tprint('\\nbuilding gensim corpus')\r\n\r\n\tgensim_corpus = [ gensim_dictionary.doc2bow(v[1].lower().split()) for v in common_corpus_list ]\r\n\tgensim.corpora.MmCorpus.serialize(paths.path_data_mmcorpus,gensim_corpus)\r\n\t# print(gensim_corpus)\r", "def create_corpus(source):\n\treturn \" \".join([file.read() for file in source])", "def load_files(directory):\n # first load the files from the corpus directory into memory\n corpus = dict()\n # Return a list specifiying a directory given by 'path'.\n for filename in os.listdir(directory):\n file_p = os.path.join(directory, filename)\n if os.path.isfile(file_p) and filename.endswith(\".txt\"):\n # os.path.join(path, *path) -- concatenation of path and *paths with exactly one directory separator (os.sep)\n with open(file_p, \"r\", encoding='utf8') as file:\n corpus[filename] = file.read()\n return corpus" ]
[ "0.64785516", "0.6462951", "0.634865", "0.6262649", "0.611011", "0.60758895", "0.60436034", "0.60298586", "0.59843755", "0.5975818", "0.59066856", "0.5899971", "0.5875928", "0.58434993", "0.58300185", "0.5774058", "0.57641536", "0.5749405", "0.5739908", "0.56826246", "0.56734616", "0.5668254", "0.56615275", "0.56515", "0.5650204", "0.5634461", "0.5633072", "0.5627442", "0.5609527", "0.5598986" ]
0.7422377
0
Computes some basic statistics about the output corpus
def corpora_stats(output): igFiles = [] for root, directories, filenames in os.walk(output + "/ig/"): for filename in filenames: igFiles.append(os.path.join(root, filename)) igFiles = filter(lambda x: ".txt" in x, igFiles) words = [] for file in igFiles: fileH = open(file, "r") words = words + fileH.read().split(" ") print("Number of words in IG corpus: {}".format(len(words))) print("Vocabulary size of IG corpus: {}".format(len(set(words))))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def corpus_statistics(corpus, d_corp):\n print('There are {} types of a total of {} tokens in the corpus.\\n' .format(number_types(corpus), corpus_length(corpus)))\n print('There average token length is {}.\\n' .format(average_length(corpus)))\n print('The longest token is {}.\\n' .format(longest_token(corpus)))\n print('The number of hapaxes is {} and represents the {} of the corpus.\\n.' .format(hapaxes(corpus), percentage(hapaxes(corpus), corpus_length(corpus))))\n print('The 10 most frequent types of the total tokens are {} and represent the {}%.\\n' .format(most_frequent(corpus), percentage_common_types(corpus))) \n print('The hapaxes present in each of the 9 partitions are {}.\\n' .format(hapaxes_parts(d_corp)))\n print('The percentage of hapaxes for each partition is {}.\\n' .format(percentage_hapaxes(d_corp, corpus)))\n plots(d_corp, corpus)\n print('\\nIn the tupla {}, the first element is the number of unique bigrams, and the second element is the percentage of unique bigrams from all the bigrams in the corpus. Similarly, in this tupla {}, the first element is the number of unique trigrams, and the second element is the percentage of unique trigrams from all the bigrams in the corpus.' .format(ngram(corpus, 2), ngram(corpus, 3)))", "def summarize_corpus():\n\t\n\t# get metadata\n\t#get_metadata.from_TEIP5(wdir, corpus_inpath, \"metadata\", md_mode)\n\t\n\t# visualize some metadata\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"author-continent\")\n\tvisualize_metadata.describe_corpus(wdir, md_csv, \"author-country\")\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"language\")\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"subgenre_hist\")\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"subgenre_x\")\n\tvisualize_metadata.plot_pie(wdir, md_csv, \"subgenre\")\n\n\tvisualize_metadata.describe_corpus(wdir, md_csv, \"subgenre\")\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"gender\")\n\t\n\t# make some counts\n\tmd_table = pd.DataFrame.from_csv(os.path.join(wdir, md_csv), header=0)\n\tnum_texts = len(md_table)\n\t#num_language = len(md_table.groupby([\"language\"]))\n\t#num_continent = len(md_table.groupby([\"author-continent\"]))\n\t#num_countries = len(md_table.groupby([\"author-country\"]))\n\t#num_authors = len(md_table.groupby([\"author-name\"]))\n\tnum_authors = len(md_table.groupby([\"author-name\"]))\n\tnum_subgenre = len(md_table.groupby([\"subgenre\"]))\n\t#num_subgenre_x = len(md_table.groupby([\"subgenre_x\"]))\n\t#fr_subgenre_hist = md_table.groupby([\"subgenre_hist\"]).count()\n\t#num_historical = fr_subgenre_hist[\"idno\"][\"historical\"]\n\t#num_not_historical = fr_subgenre_hist[\"idno\"][\"not_historical\"]\n\t\n\t\n\td = {\"texts\":[num_texts], \n\t#\"languages\":[num_language],\n\t#\"continents\":[num_continent],\n\t#\"countries\":[num_countries],\n\t\"authors\":[num_authors],\n\t#\"subgenre_x\":[num_subgenre_x],\n\t\"subgenre\":[num_subgenre]}\n\t#\"num_historical\":[num_historical],\n\t#\"num_not_historical\":[num_not_historical]}\n\t\n\t\n\t\n\tcount_fr = pd.DataFrame(d)\n\tcount_fr.to_csv(os.path.join(wdir, \"corpus-description.csv\"), sep=\",\", header=True)\n\tprint(\"Done: summarize corpus\")", "def corpus_stats(self):\n print(\"Number of sentences: {}\".format(len(self.corpus.sents())))\n print(\"Token: {}\".format(len(self.corpus.words())))\n types = FreqDist(self.corpus.words())\n print(\"Types: {}\".format(len(types)))", "def corpusStats(self, *args, **kwargs):\n return KorAPClient.corpusStats(self, *args, **kwargs)", "def compute_statistics(self):", "def measureAll(authors_texts,sectorialized_agents):\n authors_texts=P.text.aux.textFromAuthors(authors_texts,self.topm_dict[\"sectorialized_agents\"])\n authors_measures={}\n # análise de cada mensagem e de cada autor\n for author in authors_texts:\n authors_measures[author]={}\n texts=authors_texts[author]\n authors_measures[author][\"raw_strings\"]=P.text.raw.analyseAll(texts)\n authors_measures[author][\"pos\"]= P.text.pos.analyseAll(authors_analysis[author][\"raw_analysis\"])\n authors_measures[author][ \"wordnet\" ]=P.text.wordnet.analyseAll(authors_analysis[author][\"pos_analysis\"])\n authors_measures[author][\"tfIdf\"]=P.text.tfIdf.analyseAll(texts) # tfIdf de cada texto e do autor, numeric: mean e std das distancias\n # análise de cada setor e da estrutura toda\n# sectors_texts=P.text.aux.textFromSectors(authors_text,sectorialized_agents)\n sectors_measures={}\n for sector in sectorialized_agents:\n sectors_measures[sector][\"raw_strings\"]=P.text.raw.sectorsAnalyseAll(authors_analysis,sectorialized_agents[sector])\n sectors_measures[sector][\"pos\"]= P.text.pos.sectorsAnalyseAll(authors_analysis,sectorialized_agents[sector])\n sectors_measures[sector][\"wordnet\"]= P.text.wordnet.sectorsAnalyseAll(authors_analysis,sectorialized_agents[sector])\n # tfIdf de cada texto e de cada autor, numeric: mean e std das distancias por texto e por autor, e media e etd dos autores\n sectors_measures[sector][\"tfIdf\"]= P.text.tfIdf.sectorsAnalyseAll(authors_analysis,sectorialized_agents[sector])\n\n# texts=[sectors_texts[i] for i in (\"peripherals\",\"intermediaries\",\"hubs\")]\n# sectors_analysis[\"raw_strings\"]=P.text.raw.analyseAll(texts)\n# sectors_analysis[\"pos\"]= P.text.pos.analyseAll(sectors_analysis[\"raw_analysis\"])\n# sectors_analysis[ \"wordnet\" ]=P.text.wordnet.analyseAll(sectors_analysis[\"pos_analysis\"])\n# sectors_analysis[\"tfIdf\"]=P.text.tfIdf.tfIdf(texts)\n\n overall_measures[\"raw_strings\"]=P.text.raw.systemAnalysis(sectors_analysis) # medias de toda a rede por mensagem, por autor e por setor\n overall_measures[\"pos\"]=P.text.raw.systemAnalysis(sectors_analysis) # medias de toda a rede por mensagem, por autor e por setor\n overall_measures[\"wordnet\"]=P.text.raw.systemAnalysis(sectors_analysis) # medias de toda a rede por mensagem, por autor e por setor\n # tfIdf measurespor texto, autor e setor, numeric: media e desvio das distancias por cada grupo, media e desvio dos setores e dos autores\n overall_measures[\"tfIdf\"]=P.text.tfIdf.systemAnalysis(sectors_analysis) # medias de toda a rede por mensagem, por autor e por setor\n\n del authors_texts,sectorialized_agents,author, sector\n return locals()", "def process_corpus(args):\n\n fs = open(args.input,'r')\n out = list()\n for line in fs:\n blob = TextBlob(line.strip())\n result_info = dict()\n result_info\n result_info['correct'] = str(blob.correct())\n if args.parse :\n result_info['parse'] = get_parsed_text(blob)\n if args.tokenize:\n result_info['tokenize'] = get_tokenizer_result(blob)\n if args.sentiment:\n result_info['sentiment'] = analyze_sentiment(blob)\n if args.sentence_sentiment:\n result_info['sentence_sentiment'] = analyze_sentence_sentiment(blob)\n if args.noun_phrase:\n result_info['noun_phrase'] = get_noun_phrases(blob)\n if args.pos:\n result_info['pos'] = get_pos_tags(blob)\n\n out.append(result_info)\n print out\n json.dump(out,open('out.json','w'))\n fs.close()\n print '******************************* Execution completed *********************************'", "def __init__(self, corpus):\n self.total = 0\n self.reverseBigramCount = defaultdict(lambda : defaultdict(lambda : 0))\n self.bigramCount = defaultdict(lambda : defaultdict(lambda : 0))\n self.unigramCount = defaultdict(lambda: 0)\n self.train(corpus)", "def compute_document_statistics(documents, doc_lengths, path_relevance_judgements):\n \n print(\"\\n------------- Computing document statistics -------------\")\n print(f\"{len(doc_lengths)/len(documents) * 100}% of stored documents are unique.\")\n \n print(\"\\nThe following should be taken into account with the coming calculations:\")\n print(\" - Only terms that were not remove by the cleaning/stemming/... process were included.\")\n print(\" - If a term occurs multiple times in a file or across files, it is (of course) counted multiple times as well.\")\n \n total_nr_of_terms = 0\n for key in doc_lengths.keys():\n total_nr_of_terms += doc_lengths[key]\n print(f\"\\nThere are {total_nr_of_terms} terms in total accross all documents.\")\n print(f\"The average document length is {total_nr_of_terms/len(doc_lengths)}\")\n \n # Rest of the code is to test for the presence of relevant documents\n print(\"\\n-------- Testing for the presence of relevant documents --------\")\n processed_docs = set()\n for doc in documents:\n processed_docs.add(doc.cord_uid)\n \n crj_docs = set()\n # Load the stored documents\n with open(path_relevance_judgements, 'r') as f:\n for line in f:\n crj = line.split(\" \")\n cord_uid = crj[2]\n crj_docs.add(cord_uid)\n \n only_processed = processed_docs - crj_docs\n only_crj = crj_docs - processed_docs\n symmetric_difference = processed_docs.symmetric_difference(crj_docs)\n intersection = processed_docs.intersection(crj_docs)\n print(f\"There are {len(processed_docs)} unique processed documents.\")\n print(f\"There are {len(crj_docs)} unique documents in the relevance judgements.\")\n print(f\"There are {len(only_processed)} documents that were processed, but are not present in the relevance judgements.\")\n print(f\"There are {len(only_crj)} documents that are present in the relevance judgements, but were not processed.\")\n print(f\"There are {len(symmetric_difference)} documents that were both processed and are present in the relevance judgements.\")\n print(f\"There are {len(intersection)} documents that were either processed or were present in the relevance judgements, but not both.\")", "def __init__(self, corpus):\n self.unigrams = defaultdict(int)\n self.f1 = defaultdict(float)\n self.total = 0\n self.train(corpus)", "def __init__(self, corpus):\n self.unigramCounts = collections.defaultdict(lambda: 0)\n self.bigramCounts = collections.defaultdict(lambda: 0)\n self.total = 0\n self.train(corpus)", "def __init__(self, corpus):\n self.unigramCounts = collections.defaultdict(lambda: 0)\n self.bigramCounts = collections.defaultdict(lambda: 0)\n self.trigramCounts = collections.defaultdict(lambda: 0)\n self.followingWords = collections.defaultdict(lambda: set())\n self.precedingWords = collections.defaultdict(lambda: set())\n self.total = 0\n self.discount = 0.75\n self.train(corpus)", "def __init__(self, corpus):\n self.unigramCounts = collections.defaultdict(lambda: 0)\n self.totalCount = 0\n self.zeroCount = 0\n self.train(corpus)", "def train(self, corpus):\n for sentence in corpus.corpus:\n cleanSentence = sentence.cleanSentence()\n for datum in cleanSentence.data:\n token = datum.word\n self.unigramCounts[token] = self.unigramCounts[token] + 1\n self.total += 1\n\n i = 0\n while i < len(sentence.data) - 1:\n token = str(cleanSentence.get(i))\n self.followingWords[token].add(str(cleanSentence.get(i+1)))\n i += 1\n\n i = 1\n while i < len(sentence.data):\n bigram = str(cleanSentence.get(i-1)) + \" \" + str(cleanSentence.get(i))\n self.bigramCounts[bigram] = self.bigramCounts[bigram] + 1\n\n self.precedingWords[str(cleanSentence.get(i))].add(str(cleanSentence.get(i-1)))\n i += 1\n self.precedingWordsTotal = sum(map(lambda x: len(x), self.precedingWords.values()))\n\n i = 2\n while i < len(sentence.data):\n trigram = str(cleanSentence.get(i-2)) + \" \" + str(cleanSentence.get(i-1)) + \" \" + str(cleanSentence.get(i))\n self.trigramCounts[trigram] = self.trigramCounts[trigram] + 1\n i += 1\n\n #print('precedingWords')\n #print(self.precedingWords)\n #print('followingWords')\n #print(self.followingWords)\n #print('unigrams')\n #print(self.unigramCounts)\n #print('bigrams')\n #print(self.bigramCounts)\n\n #self.discount(self.trigramCounts)\n #self.discount(self.bigramCounts)\n #self.discount(self.unigramCounts)", "def train(self, corpus):\n for sentence in corpus.corpus:\n for datum in sentence.data: \n self.unigramCounts[datum.word] += 1\n self.totalCount += 1", "def __init__(self, corpus):\n self.ntokens = 0\n self.counts = collections.defaultdict(lambda: 0)\n self.s = collections.defaultdict(lambda: 0.0)\n self.train(corpus)", "def run_check(output_file,\n documents,\n queries,\n results,\n ext=\".xhtml\",\n formula_bags=False,\n keep_words=True,\n keep_math=True\n ):\n with open(output_file, \"w+\") as out:\n analyzer = Analyzer(formula_bags=formula_bags,\n keep_words=keep_words,\n keep_math=keep_math)\n queries = Queries(queries).get_queries()\n results = Results(results)\n print(\"{},{},{},{},{},{},{},{},{},{},{}\".format(\"Query\",\n \"Document\",\n \"Doc-Length\",\n \"Ranking\",\n \"Span\",\n \"Min-Span\",\n \"Normalized-Span\",\n \"Normalized-Min-Span\",\n \"Min-Distance\",\n \"Ave-Distance\",\n \"Max-Distance\"),\n file=out)\n undefined_docs = []\n for q in tqdm(range(0, len(queries))):\n query = queries[q]\n for doc in results.documents_for_query(query):\n try:\n document = Document(os.path.join(documents, doc + ext))\n (tf_dic, __) = document.lookup_dictionaries(analyzer)\n relevant = lookup_relevant(results.find_score(query, doc))\n try:\n dist = calculate_distances(query, tf_dic)\n doc_length = sum([len(tf_dic[key])\n for key in tf_dic.keys()])\n print(\"{},{},{},{},{}\".format(query,\n document,\n doc_length,\n relevant,\n \",\".join([str(d)\n for d in dist])),\n file=out)\n except DistancesUndefinedException:\n undefined_docs.append((document, relevant, query))\n except FileNotFoundError:\n print(\"Error in opening document: {}\".format(doc))\n print(\"Documents with undefined Distances\")\n for doc in undefined_docs:\n print(\"{}:{}:{}\".format(doc[2], doc[0], doc[1]))", "def main(url, inputFile, directory, rss, opml, output, verbose, debug, relevanceAlgorithm):\n\n if (len(argv) < 2):\n print(\n \"Usage: python3 ctirt.py [options] [target files]\\n\\n Use --> ctirt.py --help for more details...\"\n )\n exit(1)\n\n if (verbose and url) or (url and debug):\n print(\"URL is mutually exclusive with verbose and debug\")\n exit(1)\n \n \n \n # INITIALIZE DOCUMENTS LIST\n documents = [] # list of document objects\n\n # OPML FILE INPUT\n\n if opml:\n printLogo()\n print(\"\\033[0;34m\" + \"Parsing provided opml file: \" + \"\\033[0m\" + \"\\033[1m\" + opml + \"\\033[0m\")\n\n rssList = parser.parseOpml(opml)\n\n for rss in rssList:\n print(\"Parsing RSS feed: \" + \"\\033[1m\" + rss + \"\\033[0m\")\n\n feed = parser.parseRss(rss)\n \n if not verbose:\n # progress bar\n progressBar = IncrementalBar('\\tParsing URLs in RSS feed:', max=len(feed.entries), suffix='%(index)d / %(max)d')\n\n for entry in feed.entries:\n document = Document()\n\n document.path = entry.link\n \n document.name, document.text = parser.parseUrl(document.path)\n \n document.wordCount = parser.countWords(document.text)\n \n # Add document object to list, add document wordcount to list\n documents.append(document)\n\n if not verbose:\n progressBar.next()\n else:\n print(\"Done.\")\n \n print(\"\\n\\t\" + \"\\033[0;32m\" + u'\\u2713' + \" Done parsing RSS feed: \" + \"\\033[0m\" + \"\\033[1m\" + rss + \"\\033[0m\")\n # RSS INPUT\n\n elif rss:\n printLogo()\n print(\"Parsing\", rss)\n\n feed = parser.parseRss(rss)\n if not verbose:\n # progress bar\n progressBar = IncrementalBar('Parsing URLs', max=len(feed.entries), suffix='%(index)d / %(max)d')\n\n for entry in feed.entries:\n document = Document()\n\n document.path = entry.link\n \n document.name, document.text = parser.parseUrl(document.path)\n \n document.wordCount = parser.countWords(document.text)\n \n # Add document object to list, add document wordcount to list\n documents.append(document)\n\n if not verbose:\n progressBar.next()\n else:\n print(\"Done.\")\n \n if not verbose:\n progressBar.finish()\n\n print(\"Done.\")\n \n # URL INPUT\n \n elif url:\n printLogo()\n print(\"Parsing...\")\n\n document = Document()\n\n document.path = url\n \n document.name, document.text = parser.parseUrl(url)\n \n document.wordCount = parser.countWords(document.text)\n \n # Add document object to list, add document wordcount to list\n documents.append(document)\n\n print(\"Done.\")\n\n \n # SINGLE FILE INPUT\n\n elif inputFile:\n printLogo()\n print(\"Parsing...\")\n\n document = Document()\n\n document.name = os.path.splitext(inputFile)[0]\n document.path = inputFile\n\n if inputFile.lower().endswith(\".pdf\"): # PDF Parsing\n document.text = parser.parsePdf(inputFile)\n elif inputFile.lower().endswith(\".html\"): # HTML Parsing\n document.text = parser.parseHtml(inputFile)\n\n document.wordCount = parser.countWords(document.text) # Document word count\n\n # Add document object to list, add document wordcount to list\n documents.append(document)\n\n print(\"Done.\")\n\n\n # DIRECTORY INPUT\n\n elif directory:\n printLogo()\n if not verbose:\n # progress bar\n progressBar = IncrementalBar('Parsing', max=len(\n os.listdir(directory)), suffix='%(index)d / %(max)d')\n\n # Loop through files in directory\n for inputFile in os.scandir(directory):\n beginningTime = time.time()\n\n if verbose:\n timeStamp = time.time()\n print(\"***[\" + inputFile.name[0:50] + \"]***\", \"is currently being parsed\",\n \"-->\", (timeStamp - beginningTime), \"seconds have elapsed...\")\n\n document = Document()\n\n document.name = os.path.splitext(inputFile.name)[0]\n document.path = inputFile.path\n\n if verbose:\n print(inputFile.name)\n\n if inputFile.name.lower().endswith(\".pdf\"): # PDF Parsing\n document.text = parser.parsePdf(inputFile.path)\n elif inputFile.name.lower().endswith(\".html\"): # HTML Parsing\n document.text = parser.parseHtml(inputFile.path)\n\n document.wordCount = parser.countWords(\n document.text) # Document word count\n\n # Add document object to list, add document wordcount to list\n documents.append(document)\n\n if not verbose:\n progressBar.next()\n else:\n print(\"Done.\")\n \n if not verbose:\n progressBar.finish()\n\n\n # BASIC RELEVANCE CALCULATION\n\n for document in documents:\n document.relevance = relevance.computeBasicRelevance(document.text)\n\n\n # TF-IDF RELEVANCE CALCULATION\n\n if directory and (verbose or debug or relevanceAlgorithm == \"tfidf\"):\n dirWordCount = parser.countDirectoryWords(documents)\n\n wordList = {}\n with open('./assets/wordlist.json') as f:\n jsonWordList = load(f)\n for pair in jsonWordList.items():\n wordList[pair[0]] = float(pair[1])\n\n for document in documents:\n # TODO Figure out how to run - fix arguments (ex. import wordlist), make debug work better by allowing it to work not in verbose\n idfs = relevance.computeIDF(documents, dirWordCount)\n print(\"**************** IDFS ****************\")\n print(idfs)\n tf = relevance.computeTF(wordList, document.wordCount)\n print(\"**************** TF DICT ****************\")\n print(tf)\n\n tfidf = relevance.computeTFIDF(tf, idfs)\n print(\"**************** TF-IDF Values ****************\")\n print(tfidf)\n\n relevanceScore = 0\n\n for word, val in tfidf.items():\n relevanceScore += val\n \n document.tfidf = relevanceScore * 100\n\n\n # OUTPUT SECTION\n\n documents.sort(key=lambda document: document.relevance, reverse=True)\n\n table = []\n tableHeaders = []\n outputData = []\n # print(\"**************** RELEVANCE SCORES ****************\")\n for document in documents:\n outputData.append({'name': document.name[0:30], 'relevance': document.relevance,'path': document.path, 'topTerms': list(document.wordCount.items())[:10]})\n if url or rss or opml: \n table.append([document.name[0:30], document.relevance, document.path])\n tableHeaders = [\"Document\",\"Relevance Score\",\"URL\"]\n elif not verbose:\n table.append([document.name[0:70], document.relevance])\n tableHeaders=[\"Document\",\"Relevance Score\"]\n elif verbose and directory:\n table.append([document.name[0:70], document.relevance, document.tfidf, list(document.wordCount.items())[:10]])\n tableHeaders=[\"Document\",\"Relevance Score\", \"TF-IDF Score\", \"Top Terms\"]\n else:\n table.append([document.name[0:70], document.relevance, list(document.wordCount.items())[:10]])\n tableHeaders=[\"Document\",\"Relevance Score\", \"Top Terms\"]\n\n print(tabulate(table, headers=tableHeaders, tablefmt=\"fancy_grid\"))\n\n # OUTPUT TO FILE\n\n with open(output, 'w', encoding='utf-8') as o:\n dump(outputData, o, indent=3)", "def main():\n wf = WordFrequencies()\n tokens = wf.tokenize(sys.argv[1])\n occurrences = wf.computeWordFrequencies(tokens)\n wf.print(occurrences)", "def dataStats(self):\n print (\"Performing statistical analysis of the data\")\n # stuff to do", "def get_stats(inp_fn):\n ent_pred_cnts = []\n ent_gold_cnts = []\n ent_intr_cnts = []\n ccs = ConllCorefScores()\n jsons = [json.loads(line) for line in open(inp_fn, encoding = \"utf8\")]\n for doc_dict in jsons:\n cur_doc = nlp(\" \".join(doc_dict[\"document\"]))\n gold_clusts = doc_dict[\"gold_clusters\"]\n pred_clusts = doc_dict[\"clusters\"]\n\n gold_ent_clusts = get_entities(cur_doc, gold_clusts)\n pred_ent_clusts = get_entities(cur_doc, pred_clusts)\n\n gold_clusts_pronouns = [gold_clust\n for gold_clust, gold_ent_clust\n in zip(gold_clusts, gold_ent_clusts)\n if contains_pronoun(gold_ent_clust)]\n\n pred_clusts_pronouns = [pred_clust\n for pred_clust, pred_ent_clust\n in zip(pred_clusts, pred_ent_clusts)\n if contains_pronoun(pred_ent_clust)]\n\n update_avg_f1(ccs, gold_clusts_pronouns, pred_clusts_pronouns)\n\n pred_ents = set(map(str, flatten(pred_ent_clusts)))\n gold_ents = set(map(str, flatten(gold_ent_clusts)))\n if (not pred_ents) and (not gold_ents):\n #TODO: what's going on in these instances?\n continue\n\n ent_pred_cnts.append(len(pred_ents))\n ent_gold_cnts.append(len(gold_ents))\n ent_intr_cnts.append(len(pred_ents & gold_ents))\n\n ent_micro = micro_f1(ent_pred_cnts, ent_gold_cnts, ent_intr_cnts)\n ent_macro = macro_f1(ent_pred_cnts, ent_gold_cnts, ent_intr_cnts)\n ret_dict = {\"ent_micro\": ent_micro,\n \"ent_macro\": ent_macro,\n \"pronoun_avg_f1\": get_avg_f1(ccs)}\n\n logging.debug(f\"micro = {ent_micro['f1']:.2f}, macro = {ent_macro['f1']:.2f}\")\n\n return ret_dict", "def train(self, corpus): \n for sentence in corpus.corpus:\n last_token = None\n for datum in sentence.data:\n token = datum.word\n self.ntokens += 1\n self.counts[token] += 1\n if last_token:\n self.counts[(last_token, token)] += 1\n last_token = token\n\n for sentence in corpus.corpus:\n last_token = None\n for datum in sentence.data:\n token = datum.word\n if last_token:\n tup = (last_token, token)\n if self.counts[tup]:\n self.s[tup] = math.log(1.0 * self.counts[tup] / self.counts[last_token])\n else: # backing off\n if self.s[token] == 0:\n self.s[token] = math.log(1.0 * (self.counts[token] + 1) / (self.ntokens * 2))\n self.s[tup] = math.log(0.4 * self.s[token])\n last_token = token", "def advancedStats():", "def get_stats(sents):\n import os\n import re \n # first, put the relevant trees into temp file\n if 'outname' in kwargs.keys():\n to_open = 'tmp-%s.txt' % kwargs['outname']\n else:\n to_open = 'tmp.txt'\n with open(to_open, \"w\") as fo:\n for sent in sents:\n statsmode_results['Sentences'] += 1\n fo.write(sent.parse_string.rstrip().encode('utf-8', errors = 'ignore') + '\\n')\n deps = get_deps(sent, dep_type)\n numpass = len([x for x in deps.links if x.type.endswith('pass')])\n statsmode_results['Passives'] += numpass\n statsmode_results['Tokens'] += len(sent.tokens)\n statsmode_results['Words'] += len([w for w in sent.tokens if w.word.isalnum()])\n #statsmode_results['Unique words'] += len(set([w.word.lower() for w in sent.tokens if w.word.isalnum()]))\n #statsmode_results['Unique lemmata'] += len(set([w.lemma.lower() for w in sent.tokens if w.word.isalnum()]))\n\n # count moods via trees (/\\?/ !< __)\n from dictionaries.process_types import processes\n from corpkit.other import as_regex\n tregex_qs = {'Imperative': r'ROOT < (/(S|SBAR)/ < (VP !< VBD !< VBG !$ NP !$ SBAR < NP !$-- S !$-- VP !$ VP)) !<< (/\\?/ !< __) !<<- /-R.B-/ !<<, /(?i)^(-l.b-|hi|hey|hello|oh|wow|thank|thankyou|thanks|welcome)$/',\n #'Open interrogative': r'ROOT < SBARQ <<- (/\\?/ !< __)', \n #'Closed interrogative': r'ROOT ( < (SQ < (NP $+ VP)) << (/\\?/ !< __) | < (/(S|SBAR)/ < (VP $+ NP)) <<- (/\\?/ !< __))',\n 'Unmodalised declarative': r'ROOT < (S < (/(NP|SBAR|VP)/ $+ (VP !< MD)))',\n 'Modalised declarative': r'ROOT < (S < (/(NP|SBAR|VP)/ $+ (VP < MD)))',\n 'Open class words': r'/^(NN|JJ|VB|RB)/ < __',\n 'Closed class words': r'__ !< __ !> /^(NN|JJ|VB|RB)/',\n 'Clauses': r'/^S/ < __',\n 'Interrogative': r'ROOT << (/\\?/ !< __)',\n 'Mental processes': r'VP > /^(S|ROOT)/ <+(VP) (VP <<# /%s/)' % as_regex(processes.mental, boundaries = 'w'),\n 'Verbal processes': r'VP > /^(S|ROOT)/ <+(VP) (VP <<# /%s/)' % as_regex(processes.verbal, boundaries = 'w'),\n 'Relational processes': r'VP > /^(S|ROOT)/ <+(VP) (VP <<# /%s/)' % as_regex(processes.relational, boundaries = 'w')}\n\n for name, q in sorted(tregex_qs.items()):\n res = tregex_engine(query = q, \n options = ['-o', '-C'], \n corpus = to_open, \n root = root)\n statsmode_results[name] += int(res)\n global numdone\n numdone += 1\n if root:\n root.update()\n if not root:\n tot_string = str(numdone + 1) + '/' + str(total_files * len(tregex_qs.keys()))\n if 'outname' in kwargs.keys():\n tot_string = '%s: %s' % (kwargs['outname'], tot_string)\n animator(p, numdone, tot_string, **par_args)\n if 'note' in kwargs.keys() and kwargs['note'] is not False:\n kwargs['note'].progvar.set((numdone * 100.0 / (total_files * len(tregex_qs.keys())) / denom) + startnum)\n os.remove(to_open)", "def main():\n data_loader = TinyPerformanceLoader()\n data_loader.archive_corpus()", "def train(self, corpus):\n corpus = [w for s in corpus for w in s]\n self.words = Counter(corpus)\n self.letters = Counter()\n for word in corpus:\n self.letters.update(word)\n self.nwords = sum(self.words.values())\n self.nletters = sum(self.letters.values())\n # count(1) returns number of times 1 appears in the list\n self.a = list(self.words.values()).count(1) / self.nwords", "def _write_stats(self, stat_type, user=None, summ_type=None):\n if stat_type == \"full collection\":\n self.summary_file.write(\"\\n\\nDataset: {c}\\n\".format(c=self.dataset_name))\n self.summary_file.write(\"Number of unique urls: {u}\\nNumber of unique sites: {s}\\n\".format(u=len(set(self.stat_dict['urls'])), s=len(set(self.stat_dict['sites'])))\n )\n site_cnts = Counter(self.stat_dict['sites']).most_common()\n for site in site_cnts:\n self.summary_file.write(\"{s}: {n}\\n\".format(s=site[0], n=site[1]))\n\n if stat_type == \"token_counts\":\n self.summary_file.write(\"\\n\\nDataset: {c}\\n\".format(c=self.dataset_name))\n for doc_type in self.stat_dict:\n if user is not None:\n self.summary_file.write(\"\\n{0}, {1}\\n\".format(user, summ_type))\n\n self.summary_file.write(\n \"\\nNumber of {d}s: {p}\\nAverage tokens/{d}: {t}\\nAverage sentences/{d}: {s}\\n\".format(\n d=doc_type, p=len(self.stat_dict[doc_type][0]), t=sum(self.stat_dict[doc_type][1])/len(self.stat_dict[doc_type][1]), s=sum(self.stat_dict[doc_type][0])/len(self.stat_dict[doc_type][0])\n )\n )\n\n self.summary_file.write(\n \"Median tokens/{d}: {p}\\nStandard deviation tokens/{d}: {t}\\n\".format(\n d=doc_type, p=np.median(self.stat_dict[doc_type][1]), t=np.std(self.stat_dict[doc_type][1])\n )\n )\n\n self.summary_file.write(\n \"Median sentences/{d}: {p}\\nStandard deviation sentences/{d}: {t}\\n\".format(\n d=doc_type, p=np.median(self.stat_dict[doc_type][0]), t=np.std(self.stat_dict[doc_type][0])\n )\n )", "def stats_preprocessing(self):\n output = {'before_tot':[],\n 'before_unique':[],\n 'after_tot':[],\n 'after_unique':[]}\n for i in range(len(self.table)):\n description_raw = self.table.description.iloc[i].split(' ')\n clean_txt = self.table.clean_text.iloc[i].split(' ')\n\n output['before_tot'].append(len(description_raw))\n output['before_unique'].append(len(set(description_raw)))\n output['after_tot'].append(len(clean_txt))\n output['after_unique'].append(len(set(clean_txt)))\n \n print(\"\"\"Before preprocessing a description had on average {0} words with standard deviation {1}. \\n\nMoreover, the average of unique words was {2} and the standard deviation {3}.\"\"\"\\\n .format(round(mean(output['before_tot']), 2), round(stdev(output['before_tot']), 2), \n round(mean(output['before_unique']), 2), round(stdev(output['before_unique'])), 2))\n \n print(\"\"\"\\nAfter preprocessing a description has on average {0} words with standard deviation {1}. \\n \nThe average of unique words is now {2} and the standard deviation {3}.\"\"\"\\\n .format(round(mean(output['after_tot']), 2), round(stdev(output['after_tot']), 2), \n round(mean(output['after_unique']),2), round(stdev(output['after_unique']), 2)))\n\n return output", "def corpus_size():\n return ix.doc_count()", "def train(self, corpus): \n for sentence in corpus.corpus: # iterate over sentences in the corpus\n for token in sentence: # iterate over datums in the sentence\n self.unigrams[token] += 1\n self.total += 1\n V = len(self.unigrams) # vocabulary size \n for ug,count in self.unigrams.iteritems():\n \tself.f1[ug] = math.log10(count+1) - math.log10(self.total + V)" ]
[ "0.75794345", "0.7495605", "0.73669267", "0.6809367", "0.6783773", "0.6437376", "0.63469523", "0.6233378", "0.6222066", "0.619572", "0.6162007", "0.6081259", "0.60806775", "0.6079064", "0.6075563", "0.6050363", "0.6045781", "0.59997946", "0.5998121", "0.59935886", "0.5982539", "0.59763074", "0.59655064", "0.59523976", "0.5933469", "0.5926871", "0.5925383", "0.59080315", "0.5905814", "0.58933276" ]
0.7527568
1
Main function, orchestrates the pipeline. Creates the spark context, parses arguments, and parses all users.
def main(): sc = pyspark.SparkContext(conf=sparkConf()) sql = pyspark.SQLContext(sc) args = parse_args() cleanOutputDir(args.output) users = os.listdir(args.input) map(lambda user: parseUser(user, args, sql, args.partitions), users) corpora_stats(args.output) append_corpus(args.output)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cli():\n # Configuration\n AppConfig()\n\n # Parse the cli arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('standard_data_path', help='path to the standard data directory')\n parser.add_argument('queue', help='job queue')\n parser.add_argument('--app-name', help='spark application name which must contain the application prd',\n default='gmt00-diaman-ai')\n parser.add_argument('--driver-mem', help='amount of memory to use for the driver process',\n default='4g')\n parser.add_argument('--driver-cores', help='number of cores to use for the driver process',\n default=1)\n parser.add_argument('--executor-mem', help='amount of memory to use per executor process',\n default='8g')\n parser.add_argument('--executor-cores', help='number of cores to use on each executor',\n default=4)\n parser.add_argument('--min-executors', help='minimum number of executors to run if dynamic allocation is enabled',\n default=4)\n parser.add_argument('--max-executors', help='maximum number of executors to run if dynamic allocation is enabled',\n default=12)\n parser.add_argument('--ini-executors', help='initial number of executors to run if dynamic allocation is enabled',\n default=4)\n args = parser.parse_args()\n\n # Instantiate spark\n _, spark_session = spark_config.get_spark(app_name=args.app_name,\n queue=args.queue,\n driver_mem=args.driver_mem,\n driver_cores=args.driver_cores,\n executor_mem=args.executor_mem,\n executor_cores=args.executor_cores,\n min_executors=args.min_executors,\n max_executors=args.max_executors,\n ini_executors=args.ini_executors)\n\n # Run the train pipeline\n train_pipeline.run(spark_session, args.standard_data_path)", "def main():\n\n print(\"Initiating Spark session...\")\n print('-' * 50)\n spark = create_spark_session()\n \n # Use these settings if you want to test on the full\n # dataset, but it takes a LONG time.\n song_input_data = config['AWS']['SONG_DATA']\n log_input_data = config['AWS']['LOG_DATA']\n \n # Uncomment the two lines if you want to test on\n # minimal data\n #song_input_data = config['AWS']['SINGLE_SONG_DATA']\n #log_input_data = config['AWS']['SINGLE_LOG_DATA']\n \n output_data = config['AWS']['OUTPUT_DATA']\n \n print('-' * 50)\n print(\"Processing song data...\")\n print('-' * 50)\n print('')\n process_song_data(spark, song_input_data, output_data)\n \n print('-' * 50) \n print(\"Processing log data...\")\n print('-' * 50)\n print('')\n process_log_data(spark, song_input_data, log_input_data, output_data)", "def main():\n # start Spark application and get Spark session, logger and config\n spark = SparkSession \\\n .builder \\\n .appName(\"PokemonBasicETLOperations\") \\\n .config(\"spark.eventLog.enabled\", True) \\\n .enableHiveSupport() \\\n .getOrCreate()\n\n print('PokemonBasicETLOperations ETL is up-and-running')\n \n # execute ETL pipeline\n pokemon = extract(spark)\n max_attack_per_type,agg_legend_poke,special_criteria_poke = transform(pokemon)\n load(max_attack_per_type,agg_legend_poke,special_criteria_poke)\n\n print('PokemonBasicETLOperations ETL job is finished')\n spark.stop()\n return None", "def execute(self, context):\n\n self._hook = SparkSubmitHook(\n conf=self._conf,\n conn_id=self._conn_id,\n ssh_conn_id=self._ssh_conn_id,\n files=self._files,\n py_files=self._py_files,\n driver_classpath=self._driver_classpath,\n jars=self._jars,\n java_class=self._java_class,\n packages=self._packages,\n exclude_packages=self._exclude_packages,\n repositories=self._repositories,\n total_executor_cores=self._total_executor_cores,\n executor_cores=self._executor_cores,\n executor_memory=self._executor_memory,\n driver_memory=self._driver_memory,\n keytab=self._keytab,\n principal=self._principal,\n name=self._name,\n num_executors=self._num_executors,\n application_args=self._application_args,\n env_vars=self._env_vars,\n verbose=self._verbose,\n dataeng_spark=self.dataeng_spark,\n dataeng_spark_pyenv_path=self.dataeng_spark_pyenv_path\n\n )\n self._hook.submit(self._application)", "def main():\n # Initiate Spark Session\n spark = create_spark_session()\n \n # Data files\n # Root Data Path\n # Uncomment below line for AWS S3\n #input_data = \"s3a://udacity-dend\"\n # Uncomment below line for local files\n input_data = \"data\"\n\n # Warehouse\n # Root WH\n # Uncomment below line for AWS S3\n #output_data = \"s3a://jerryespn-project-out\"\n # Uncomment below line for local files\n output_data = \"spark-warehouse\"\n \n process_song_data(spark, input_data, output_data) \n process_log_data(spark, input_data, output_data)", "def main():\n spark_it_up()", "def main():\n spark = create_spark_session()\n\n input_data = config['STORAGE']['INPUT_DATA']\n output_data = config['STORAGE']['OUTPUT_DATA']\n\n process_song_data(spark, input_data, output_data)\n process_log_data(spark, input_data, output_data)", "def main():\n spark = create_spark_session()\n\n # Used for local testing - commented out\n # input_data = \"./data/\"\n # output_data = \"./data/\"\n input_data = \"s3a://udacity-dend/\"\n output_data = \"s3a://allen-lesson4-datalake-bucket/\"\n\n process_song_data(spark, input_data, output_data)\n process_log_data(spark, input_data, output_data)\n spark.stop()", "def main():\n spark = create_spark_session()\n logging.info('Spark Session created')\n input_data = \"s3a://udacity-dend/\"\n output_data = \"s3a://dend-emr-project\"\n #input_data = './data/'\n #output_data = '/Users/daniel/Desktop/output/'\n logging.info(f'Set input path to {input_data}')\n logging.info(f'Set output path to {output_data}')\n \n copy_raw_data(spark, input_data, output_data)\n\n s3_data = restore_data_from_s3(spark, output_data)\n \n sas_desc_string = load_sas_desc_file(input_data)\n \n process_fact_table(spark, s3_data, output_data, sas_desc_string)\n \n process_dim_tables(spark, s3_data, output_data, sas_desc_string)\n\n data_quality_check(spark, output_data)\n \n logging.info('ETL process successfully finished.')", "def main():\n spark = create_spark_session()\n input_data = \"s3a://udacity-dend/\"\n output_data = \"data/analytics\"\n \n process_song_data(spark, input_data, output_data) \n process_log_data(spark, input_data, output_data)", "def main():\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n get_user_config()", "def main():\n spark = create_spark_session()\n\n input_data = \"s3a://udacitydenanodegree2020/\"\n output_data = \"s3a://udacitydenanodegree2020/output/\"\n\n process_song_data(spark, input_data, output_data) \n process_log_data(spark, input_data, output_data)", "def main(): \n spark = create_spark_session()\n print(\"Spark Session Created\")\n\n #Print S3 bucket location\n s3_bucket=os.environ[\"s3_bucket\"]\n s3_bucket = s3_bucket.replace(\"'\", \"\")\n \n print (s3_bucket)\n \n #Invoke Functions to process data\n process_data(spark, s3_bucket)", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-data_dir\", required=True, help=\"Directory containing original data set in requisite folder structure (small part or all data)\")\n parser.add_argument(\"-features_filename\", required=True, help=\"Features cloudpickle file that provides that pruning information\")\n parser.add_argument(\"-start_seed\", type=int, default=1284171779)\n parser.add_argument(\"-num_datasets\", type=int, default=20)\n parser.add_argument(\"-modes\", choices=[PREPROCESS, TRAIN, EVALUATE], nargs=\"+\", required=True)\n args = parser.parse_args()\n return pipeline(args)", "def main() -> None:\n ROOT_DIR = dirname(abspath(__file__))\n spark = create_spark_session()\n input_data = 's3a://udacity-dend/'\n output_data = ROOT_DIR + '/data/'\n\n process_song_data(spark, input_data, output_data)\n process_log_data(spark, input_data, output_data)", "def main():\n spark = create_spark_session()\n input_data = \"s3a://udacity-dend/\"\n output_data = \"s3a://udacity-nanodegree-data-engineer/\"\n \n process_song_data(spark, input_data, output_data) \n process_log_data(spark, input_data, output_data)", "def main():\n my_emr = EmrProcessing()\n\n if \"-s\" in sys.argv:\n my_emr.verbose_mode = False\n else:\n my_emr.verbose_mode = True\n print \"\\nStarting Titanic Data Analysis\"\n my_emr.parse_user_selections()\n\n # Setup\n my_emr.clear_local_output_directory()\n my_emr.update_mapper_file(\"model2\")\n\n # S3 activities\n my_emr.empty_bucket()\n my_emr.create_and_fill_bucket()\n\n # EMR activities\n my_emr.setup_and_run_job()\n my_emr.wait_until_job_completes()\n\n # Cleanup\n my_emr.download_output_files()\n my_emr.post_process_output_file()\n if my_emr.verbose_mode:\n my_emr.print_local_output_files_stats()", "def main(self, sc: SparkContext, *args: Any):\n experiment_parquet_path = args[0]\n mouse_parquet_path = args[1]\n embryo_parquet_path = args[2]\n impress_parquet_path = args[3]\n output_path = args[4]\n spark = SparkSession(sc)\n experiment_normalized_df = self.cross_reference_experiments(\n spark,\n experiment_parquet_path,\n mouse_parquet_path,\n embryo_parquet_path,\n impress_parquet_path,\n )\n experiment_normalized_df.write.mode(\"overwrite\").parquet(output_path)", "def main():\n args = parse_args()\n process_args(args)", "def main():\n # create a Spark session\n spark = create_spark_session()\n\n # set input & output data locations\n input_data = \"data/\"\n output_data = \"results/\"\n\n # Gather/read the datasets\n df_visits = spark.read.parquet(\"data/immigration_data\")\n df_demo = spark.read.csv(\"data/us-cities-demographics.csv\", sep=\";\", header=True)\n df_airports = spark.read.csv(\"data/airport-codes_csv.csv\", header=True)\n df_airport_codes = get_airport_codes(spark)\n df_countries = get_countries(spark)\n df_states = get_states(spark)\n df_visa = get_visa(spark)\n\n # clean the datasets\n df_airports_clean = clean_airport_codes(spark,df_airports)\n df_demo_clean= clean_demographics(spark,df_demo)\n df_visits_clean = clean_immigration_data(spark, df_visits, df_airport_codes, df_countries, df_states, df_visa)\n\n # load the fact and dimensions in parquet files\n load_dimensions(output_data, df_countries, df_states, df_visa, df_demo_clean, df_airports_clean)\n load_fact(spark,output_data, df_visits_clean)\n\n # run validation checks\n validate_dimensions(spark,['dim_visa','dim_state','dim_country','dim_us_demo','dim_airports'],output_data)\n validate_fact(spark,'fact_visits',output_data)", "def main():\n spark = create_spark_session()\n input_data = \"s3a://udacity-dend/\"\n output_data = \"s3a://udacity-data-lake/output/\"\n\n process_song_data(spark, input_data, output_data)\n process_log_data(spark, input_data, output_data)", "def main() -> None:\n jobStatus = list()\n adt = AuditManager(\n config.get(\"Audit\").get(\"database\"),\n config.get(\"Audit\").get(\"user\"),\n config.get(\"Audit\").get(\"password\"),\n config.get(\"Audit\").get(\"host\"),\n config.get(\"Audit\").get(\"port\"),\n )\n jobMeta = adt.getStepLogData()\n adt.closeConnection()\n with concurrent.futures.ThreadPoolExecutor(\n max_workers=config.get(\"spark\").get(\"parallelJobs\", 2)\n ) as executor:\n spark_jobs = {\n executor.submit(processFile, fileMeta): fileMeta for fileMeta in jobMeta\n }\n for status in concurrent.futures.as_completed(spark_jobs):\n fileStatus = status.result()\n jobStatus.append(fileStatus)\n logger.info(jobStatus)", "def setUp(self):\n self.spark, self.log, self.config = start_spark(app_name = \"test_etl_job\",\n files='configs/etl_config.json')", "def main(input_dir, output):\n\tspark = create_spark_session()\n\tprocess_header_data(spark, input_dir, output)", "def main(arguments):\n auth = (arguments['username'], arguments['token'])\n data_collector = DataCollector(arguments['repo name'],\n arguments['start date'],\n arguments['end date'], auth,\n arguments['all'], arguments['page'])\n data_collector.collect_signals()", "def define_and_process_args():\n\n description = main.__doc__\n formatter_class = argparse.ArgumentDefaultsHelpFormatter\n parser = argparse.ArgumentParser(description=description,\n formatter_class=formatter_class)\n\n parser.add_argument('--data_dir', default='~/Data/JIGSAWS/Suturing',\n help='Data directory.')\n parser.add_argument('--data_filename', default='standardized_data.pkl',\n help='''The name of the standardized-data pkl file that\n resides in data_dir.''')\n parser.add_argument('--test_users', default='B',\n help='''A string of the users that make up the test set,\n with users separated by spaces.''')\n\n parser.add_argument('--model_type', default='BidirectionalLSTM',\n help='''The model type, either BidirectionalLSTM,\n ForwardLSTM, or ReverseLSTM.''')\n parser.add_argument('--num_layers', type=int, default=1,\n help='The number of hidden layers.')\n parser.add_argument('--hidden_layer_size', type=int, default=1024,\n help='The number of hidden units per layer.')\n parser.add_argument('--dropout_keep_prob', type=float, default=0.5,\n help='''The fraction of inputs to keep whenever dropout\n is applied.''')\n\n parser.add_argument('--batch_size', type=int, default=5,\n help='The number of sequences in a batch/sweep.')\n parser.add_argument('--num_train_sweeps', type=int, default=600,\n help='''The number of training sweeps. A sweep\n is a collection of batch_size sequences that\n continue together throughout time until all\n sequences in the batch are exhausted. Short\n sequences grow by being wrapped around in\n time.''')\n parser.add_argument('--initial_learning_rate', type=float, default=1.0,\n help='The initial learning rate.')\n parser.add_argument('--num_initial_sweeps', type=int, default=300,\n help='''The number of initial sweeps before the\n learning rate begins to decay.''')\n parser.add_argument('--num_sweeps_per_decay', type=int, default=50,\n help='''The number of sweeps per learning-rate decay,\n once decaying begins.''')\n parser.add_argument('--decay_factor', type=float, default=0.5,\n help='The multiplicative learning-rate-decay factor.')\n parser.add_argument('--max_global_grad_norm', type=float, default=1.0,\n help='''The global norm is the norm of all gradients\n when concatenated together. If this global norm\n exceeds max_global_grad_norm, then all gradients\n are rescaled so that the global norm becomes\n max_global_grad_norm.''')\n\n parser.add_argument('--init_scale', type=float, default=0.1,\n help='''All weights will be initialized using a\n uniform distribution over\n [-init_scale, init_scale].''')\n parser.add_argument('--num_sweeps_per_summary', type=int, default=7,\n help='''The number of sweeps between summaries. Note:\n 7 sweeps with 5 sequences per sweep corresponds\n to (more than) 35 visited sequences, which is\n approximately 1 epoch.''')\n parser.add_argument('--num_sweeps_per_save', type=int, default=7,\n help='The number of sweeps between saves.')\n\n args = parser.parse_args()\n args.data_dir = os.path.expanduser(args.data_dir)\n args.test_users = args.test_users.split(' ')\n return args", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--config_file',\n dest='config_file',\n required=True,\n default=None,\n help='JSON config file.')\n args = parser.parse_args()\n\n config = Config(args.config_file)\n\n logger.info(\"Executing preprocessing pipeline...\")\n run_pipeline(config)\n logger.info(\"Done.\")", "def main():\n spark = create_spark_session()\n input_data = \"s3a://udacity-dend\"\n output_data = \"s3a://vivek1bucket\"\n \n process_song_data(spark, input_data, output_data) \n process_log_data(spark, input_data, output_data)", "def main():\n spark = create_spark_session()\n input_data = \"s3a://udacity-dend/\"\n output_data = \"s3a://dend-bucket-cpm/\"\n\n process_song_data(spark, input_data, output_data)\n process_log_data(spark, input_data, output_data)", "def spark(self, *args, **kwargs):\n self.spark_submit(*args, **kwargs)" ]
[ "0.66675025", "0.653914", "0.64703673", "0.6123042", "0.61154336", "0.6108758", "0.60041773", "0.5920403", "0.5776751", "0.5748296", "0.57471836", "0.57071954", "0.5682872", "0.5638785", "0.56291026", "0.5625207", "0.5623799", "0.56191224", "0.56022197", "0.5595379", "0.5585689", "0.5532733", "0.5504484", "0.5433838", "0.54293984", "0.54289067", "0.54266405", "0.54199994", "0.5398467", "0.5395869" ]
0.72176826
0
Checks if a session exists for the given sender id, else create a new one
def find_session(sender_id): session = db.sessions.find_one({'sender_id': sender_id}) if session is None: session_id = str(uuid.uuid4()) db.sessions.insert_one({'createdAt': datetime.datetime.utcnow(), 'sender_id': sender_id, 'session_id': session_id}) else: session_id = session['session_id'] return session_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def new_session(self):\n body = yield from self._fetch_json(URL_LOGIN, self._new_session_data)\n self.sma_sid = jmespath.search('result.sid', body)\n if self.sma_sid:\n return True\n\n msg = 'Could not start session, %s, got {}'.format(body)\n\n if body.get('err'):\n if body.get('err') == 503:\n _LOGGER.error(\"Max amount of sesions reached\")\n else:\n _LOGGER.error(msg, body.get('err'))\n else:\n _LOGGER.error(msg, \"Session ID expected [result.sid]\")\n return False", "def test_create_session(self):\n finder = FinderInsidePro(self.test_key)\n session_id = finder.create_session(2811)\n assert isinstance(session_id, str)\n assert session_id == finder.session_id\n assert len(session_id)", "def _insert_new_session():\n request = self._make_request()\n session_existing = self._set_up_session_in_Redis_and_makeOne( # noqa: F841\n request, session_id, session_dict={\"visited\": True}, **session_args\n )\n return request", "def make_new_session():\n session = Session.objects.create(uuid=str(uuid4()), container_id=None)\n return session.id", "def create_session(self,session_id,host_id,host_name,spotify_token):\n self.sessions[session_id] = {\n \"HOST\" : {\n \"ID\" : host_id,\n \"NAME\" : host_name,\n \"spotify_token\" : spotify_token,\n \"spotify_player\": None,\n },\n \"queue\" : [],\n \"queue_lock\" : False,\n \"current_track\" : \"\",\n \"previous_track\" : \"\",\n \"USERS\" : {}\n }", "def check_session(session_id):\n return session_cache.hget(session_id)", "def join_session(self, information, player):\n try: # if input of int() is not convertible to integer it throws an error\n req_ses_id = int(information.split(protocol._MSG_FIELD_SEP)[1])\n except ValueError:\n print(\"session id is not int convertible: %s\" % information.split(protocol._MSG_FIELD_SEP))\n return # TODO: appropriate error to user\n\n for session in self.current_sessions:\n if session.game_id == req_ses_id:\n break\n self.__lock.acquire()\n player.current_session_id = session.game_id\n joined_session = session.add_player(player)\n # TODO: some mysterious behavior observed here. couldn't reproduce it [Novin]\n print(\"player added to current session!\")\n self.__lock.release()\n if joined_session:\n return session\n else:\n return None", "def test_unique_id(self):\n session1 = _create_test_session()\n session2 = _create_test_session()\n self.assertNotEqual(session1.id, session2.id)", "async def create_session(session: SessionModel, mongo: MongoDB = mongodb) -> SessionOutModel:\n if not await mongo.session_coll.find_one({\"id\": session.id}):\n await mongo.session_coll.insert_one(session.dict())\n else:\n await mongo.session_coll.update_one({\"id\": session.id}, {'$set': {'status': session.status}})\n return SessionOutModel(**session.dict())", "def _createSessionObject(self, request):\n # Preload necessary data items\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n user_id = user.email()\n # Get the conference entity\n conf = _getEntityByWebsafeKey(request.websafeConferenceKey,\n 'Conference')\n # Ensure that the current user is the conference organizer\n if user_id != conf.organizerUserId:\n raise endpoints.UnauthorizedException(\n 'Only the conference organizer can create a new session')\n # Verify that the speaker exists\n speaker = _getEntityByWebsafeKey(request.websafeSpeakerKey, 'Speaker')\n # Ensure that the user submitted the required name property\n if not request.name:\n raise endpoints.BadRequestException(\n \"Session 'name' field required\")\n # Copy SessionForm/ProtoRPC Message into dict\n data = {\n field.name: getattr(request, field.name) for field in\n request.all_fields()\n }\n # Remove data that isn't destined for the Session entity\n del data['websafeConferenceKey']\n del data['websafeSpeakerKey']\n del data['websafeKey']\n # Add default values for those missing in the data model\n for df in SESSION_DEFAULTS:\n if data[df] in (None, []):\n data[df] = SESSION_DEFAULTS[df]\n # Ensure the string version of typeOfSession is what is stored\n # in the NDB model\n data['typeOfSession'] = str(data['typeOfSession'])\n # Convert date from string to Date object\n if data['date'] is not None:\n try:\n data['date'] = datetime.strptime(\n data['date'][:10], '%Y-%m-%d').date()\n except:\n raise endpoints.BadRequestException(\n \"Invalid 'date' value\")\n # Convert startTime from string to Time object\n if data['startTime'] is not None:\n try:\n data['startTime'] = datetime.strptime(\n data['startTime'], '%H:%M').time()\n except:\n raise endpoints.BadRequestException(\n \"Invalid 'startTime' value\")\n # Create Session\n session = Session(**data)\n session.conference = conf.key\n session.speaker = speaker.key\n session.put()\n # Add the session key to the speaker's sessions list\n speaker.sessions.append(session.key)\n speaker.put()\n # Add a task to task queue which checks if the speaker of this session\n # should be the new featured speaker\n taskqueue.add(params={'websafeSpeakerKey': request.websafeSpeakerKey,\n 'websafeConferenceKey': request.websafeConferenceKey},\n url='/tasks/update_featured_speaker'\n )\n # Return SessionForm object\n return self._copySessionToForm(session)", "def create_inactive_session_from_invitation(self, setup_id, user_id):\n session = Session.objects.create(setup_id=Setup.objects.get(id=setup_id), user_id=User.objects.get(id=user_id),\n status=\"inactive\")", "def joined(self, channel):\n # find or make a session. \n ss = self.findSessions(channel)[0]\n if ss.isDefaultSession: # i.e., not found\n channel = channel.decode(self.serverEncoding)\n ss = self.store.find(d20session.D20Session,\n d20session.D20Session.name == channel).one()\n\n if ss is None:\n ss = d20session.D20Session()\n ss.name = channel.decode(ss.encoding)\n self.store.add(ss)\n Store.of(ss).commit()\n\n self.sessions.append(ss)\n\n self.responding = 1", "def add_session(self, session):\n with self._sessions_lock:\n if session.session_id in self.sessions:\n raise KeyError(\"non-unique session id %s for %s\" % (session.session_id, session))\n self.sessions[session.session_id] = session\n\n return session", "def new_session(self, information, current_player):\n game_name = information.split(protocol._MSG_FIELD_SEP)[1]\n\n # if not self.__is_name_valid(game_name):\n # return None # TODO: be more informative on reasons to client\n\n max_num_of_players = information.split(protocol._MSG_FIELD_SEP)[2]\n # if max_num_of_players < 1 or max_num_of_players > 100:\n # return None # TODO: be more informative to client\n\n s_id = len(self.current_sessions ) + 1\n current_player.current_session_id = s_id\n session = Session(protocol._PENDING, s_id, game_name,\n self.sudoku_name,\n self.sudoku_sol,\n # 'sudoku/puzzles/sudoku_easy_1.csv',\n # 'sudoku/puzzles/sudoku_easy_1_solution.csv',\n max_num_of_players,\n [current_player])\n session.game_start()\n\n self.__lock.acquire()\n self.current_sessions.append(session)\n self.__lock.release()\n return session", "def test_newSession(self):\n session = self.mdk.session()\n session2 = self.mdk.session()\n self.assertSessionHas(session, session._context.traceId, [0])\n self.assertSessionHas(session2, session2._context.traceId, [0])\n self.assertNotEqual(session._context.traceId,\n session2._context.traceId)", "def add_event(self, event, timestamp):\n if event.name == 'check_open':\n new_event_end = -1\n else:\n new_event_end = timestamp + event.timeout\n\n is_created = False\n if self.session \\\n and (self.session.end > timestamp or self.session.end == -1):\n if event.name == 'check_close' and self.session.end < timestamp:\n self.session.end = timestamp\n if event.name == 'check_open':\n self.session.end = -1\n if (self.session.end > timestamp\n and self.session.end < new_event_end):\n self.session.end = new_event_end\n return is_created\n\n is_created = True\n self.session = Session(uuid4(), timestamp, new_event_end)\n self.all_sessions.append(self.session)\n \n return is_created", "def is_valid_session(self, session):\n return self.identifier.startswith(session)", "def _get_by_sid(self, sid):\n if self._is_valid_sid(sid):\n data = self.session_model.get_by_sid(sid)\n if data is not None:\n self.sid = sid\n logging.info(sid)\n logging.info(sessions.SessionDict(self, data=data))\n return sessions.SessionDict(self, data=data)\n logging.info('new')\n self.sid = self._get_new_sid()\n return sessions.SessionDict(self, new=True)", "def test_duplicate_user(self, mapp, existing_user_id):\n\n mapp.create_user(user=existing_user_id, password=1234,\n email=existing_user_id + \"@example.com\", code=409)", "def get_session_by_id(self, name, id):\n cls, pending, connected = self._proxies[name]\n for session in pending:\n if session.id == id:\n return session\n for session in connected:\n if session.id == id:\n return session", "def create(id = None, expires=None):\n\n\t# Init the data\n\tdData = {}\n\n\t# If we have an expires time\n\tif expires:\n\t\tdData['__expire'] = expires\n\n\t# Create a new Session using a UUID as the id\n\treturn _Session(id and id or uuid.uuid4().hex, dData)", "def create_session(self, _id: int = None, _cls: type[Session] = Session) -> Session:\n if not _id:\n _id = 1\n while _id in self.sessions:\n _id += 1\n session = _cls(_id, config=self.config)\n session.service_manager = self.service_manager\n logger.info(\"created session: %s\", _id)\n self.sessions[_id] = session\n return session", "def test_create_session(self):\n study_id = self.storage.create_study(sample_study_spec())\n\n session = sample_session(study_id=study_id)\n self.storage.create_session(session)\n\n self.assertEqual(self.storage.get_session(study_id, session.id), session)", "def session(self, sid):\n s = self.list\n if sid not in s:\n for k in s:\n if s[k]['uuid'] == sid:\n if s[k]['type'] == 'meterpreter':\n return MeterpreterSession(k, self.rpc, s)\n elif s[k]['type'] == 'shell':\n return ShellSession(k, self.rpc, s)\n raise KeyError('Session ID (%s) does not exist' % sid)\n if s[sid]['type'] == 'meterpreter':\n return MeterpreterSession(sid, self.rpc, s)\n elif s[sid]['type'] == 'shell':\n return ShellSession(sid, self.rpc, s)\n raise NotImplementedError('Could not determine session type: %s' % s[sid]['type'])", "def test_create_with_duplicate_userid(self):\n\n self.sdkapi.guest_create(self.userid, 1, 1024)\n try:\n self.sdkapi.guest_create(self.userid, 1, 1024)\n except exception.SDKSMUTRequestFailed as err:\n self.assertEqual(err.results['rc'], 400)\n self.assertEqual(err.results['rs'], 8)", "def same_user(user_id):\n return user_id == login_session['user_id']", "def create_single_sign_on_session(remote_ip, auth_user, secure=True):\n # must define groups but not populating at the moment !!!\n groups = []\n\n # Successful authentication and access verification, create a session and return.\n cherrypy.log.error(\"++ create_single_sign_on_session creating session for %s\" % auth_user)\n sid = uuid.uuid4().hex\n session = {\"created\": datetime.datetime.utcnow(), \"creator\": auth_user}\n with slycat.web.server.database.couchdb.db_lock:\n clean_up_old_session(auth_user)\n database = slycat.web.server.database.couchdb.connect()\n \n database.save({\"_id\": sid, \"type\": \"session\", \"created\": str(session[\"created\"].isoformat()), \"creator\": str(session[\"creator\"]),\n 'groups': groups, 'ip': remote_ip, \"sessions\": [], \"last-active-time\": str(session[\"created\"].isoformat())})\n\n cherrypy.response.cookie[\"slycatauth\"] = sid\n cherrypy.response.cookie[\"slycatauth\"][\"path\"] = \"/\"\n if secure:\n cherrypy.response.cookie[\"slycatauth\"][\"secure\"] = 1\n cherrypy.response.cookie[\"slycatauth\"][\"httponly\"] = 1\n timeout = int(cherrypy.request.app.config[\"slycat\"][\"session-timeout\"].total_seconds())\n cherrypy.response.cookie[\"slycatauth\"][\"Max-Age\"] = timeout\n cherrypy.response.cookie[\"slycattimeout\"] = \"timeout\"\n cherrypy.response.cookie[\"slycattimeout\"][\"path\"] = \"/\"\n cherrypy.response.cookie[\"slycattimeout\"][\"Max-Age\"] = timeout\n\n cherrypy.response.status = \"200 OK\"\n cherrypy.request.login = auth_user", "def get_session(session_id):\n response_dict = {}\n if request.method == 'POST' and request.json:\n # First Time creation\n # with or without json data\n # session_id = request.json.get('session_id')\n if not session_id:\n return return_response({\"message\": \"Something is missing, \"\n \"read the API docs for \"\n \"more information.\"}, 403)\n if is_active_session(session_id):\n return return_response({\"message\": \"Conflict, ID already exists. Use PUT instead of POST.\"}, 409)\n if request.json:\n update_or_create_session(session_id=session_id, data=request.json.get('data'))\n response_dict['ok'] = True\n elif request.method == 'PUT' and request.json:\n # Updating information in session\n if not session_id:\n return return_response({\"message\": \"Something is missing, \"\n \"read the API docs for \"\n \"more information.\"}, 403)\n if request.json:\n update_or_create_session(session_id=session_id, data=request.json.get('data'))\n response_dict['ok'] = True\n elif request.method == 'GET':\n # Getting information for a session_id or get new random session_id\n if session_id is None:\n response_dict['session_id'] = generate_random_session_id()\n else:\n data = get_session_data(session_id=session_id)\n if data is not None:\n response_dict = {'data': data, 'ok': True}\n else:\n return return_response({\"message\": \"ID does not exists\"}, 404)\n else:\n pass\n\n return return_response(response_dict)", "def enter_contest(self, message, db_session):\n username = self.ts.get_user(message)\n user = db_session.query(db.User).filter(db.User.name == username).one_or_none()\n if user:\n print('user found')\n if user.entered_in_contest:\n self._add_to_whisper_queue(user.name, 'You\\'re already entered into the contest, you can\\'t enter again.')\n else:\n user.entered_in_contest = True\n self._add_to_whisper_queue(user.name, 'You\\'re entered into the contest!')\n else:\n print('user created')\n user = db.User(entered_in_contest=True, name=username)\n # user.name = username\n db_session.add(user)\n print(user.name)\n self._add_to_whisper_queue(username, 'You\\'re entered into the contest!')", "async def create(self, session, *, dc=None):\n response = await self._api.put(\n \"/v1/session/create\",\n data=session,\n params={\"dc\": dc})\n return response.body" ]
[ "0.61570853", "0.58773726", "0.58149797", "0.56339175", "0.56324416", "0.5605456", "0.5528951", "0.5513518", "0.54047126", "0.5401269", "0.53964466", "0.53947234", "0.5350365", "0.53205097", "0.53166515", "0.5314262", "0.5306703", "0.52829957", "0.52487093", "0.52188724", "0.5213756", "0.52015024", "0.5195303", "0.51923895", "0.51413393", "0.51205033", "0.51068586", "0.50938016", "0.50840944", "0.50828445" ]
0.7605206
0
Concatenates all messages in the session
def concatenate_session(session_id): conversation = "" for msg in db.messages.find({'session_id': session_id}): conversation += (msg['message'] + "\n") return conversation
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_message(): \n return \"<br>\".join(messages)", "def consolidate_messages(self, msg):", "def send_messages(messages):\n while messages:\n msg = messages.pop()\n sent_messages.append(msg)", "def merge_messages(self, msg_list):\n return self.recv_instance.empty_msg.join(msg_list)", "def send_all(self, msg):\n self.update_chats()\n for c in self.chats:\n self.send_message(msg, c)", "def send_messages(messages, sent_messages):\n while messages:\n current_message = messages.pop()\n print(f\"Sending message: {current_message}\")\n sent_messages.append(current_message)", "def get_append_messages(self):\n\t\treturn self._appendMessages", "def message_all(self, message):\n # We copy the _clients into a list to avoid dictionary changing\n # size during iteration.\n for character in self.players.values():\n character.message(message)", "def joined(message):\n global list_messages\n room = session.get('room')\n join_room(room)\n print ('joined session list_messages ' + str(len(list_messages)) + ' , session ' + str(session) +'\\n')\n emit('status', {'msg': str(clients)})\n for x in list_messages:\n emit('status', {'msg': x})\n emit('status', {'msg': session.get('name') + ' has entered the room.'}, room=room)", "def get_all(self):\n request = get_current_request()\n messages = []\n for queue in self.queues:\n for peeked in request.session.peek_flash(queue):\n messages.append({'message': peeked, 'queue': queue,})\n request.session.pop_flash(queue)\n return messages", "def text(message):\n global list_messages\n room = session.get('room')\n msg = session.get('name') + ':' + message['msg']\n list_messages.append(msg)\n addNewMsg(message,session)\n print ('size of list_messages ' + str(len(list_messages)) + ', session ' + str(session))\n emit('message', {'msg': msg}, room=room)", "def all_messages(self):\n request = {'token': self.token, 'include_received': True, 'include_read': True, 'include_sent': True}\n return Session.send_request('messages', request, Session.FULL_RESPONSE_OR_NONE)", "def merge(session_id, context, entities, msg):\n pass", "def get_and_delete_messages (self):\n return []", "def get_and_delete_messages (self):\n return []", "def characters(self, message):\n self._message = self._message + message", "def send_all(self,\n message: bytes\n ) -> None:\n\n self.log_to_debug(\n line=f\"Send_All: {message}\"\n )\n for user_key in self.connected_users:\n\n if self.connected_users[user_key]['authorized']:\n protocol = self.connected_users[user_key]['main']['base'].transport.protocol\n protocol.sendLine(\n line=message\n )", "def multiple_messages(self, messages):\n for message in messages:\n cmd = '{}serverMessage \"{}\"'.format(self.console, Commands.aquote(message))\n self.write_command(cmd)", "def bus_messages(self):\n\n output = []\n for message in self.__bus_messages:\n if time.time() - message['time'] > BusController.MESSAGES_TTL:\n self.__bus_messages.remove(message)\n output.append(f\"l{message['sender'].line_num}-s{message['sender'].station_num} sent: {message['text']}\")\n while len(output)<BusController.MAX_MESSAGES_TO_DISPLAY:\n output.append(\"\")\n return output", "def list_messages(self):", "def __flush(self):\n self.session.buffer=''\n self.session.before=''", "def display_messages(self):\n\n\t\twhile self.joined:\n\t\t\tif len(self.messages) != 0:\n\t\t\t\tfor msg in self.messages:\n\t\t\t\t\t#: If the message is empty, ignore it.\n\t\t\t\t\tif msg == \"\":\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t#: If the message is close\", then the server has told the client\n\t\t\t\t\t#: to shut down, so it will. This is not an issue, as users\n\t\t\t\t\t#: messages will always have an identifier and : before their\n\t\t\t\t\t#: message, thus,the only messages that don't include an\n\t\t\t\t\t#: identifier will be from the server itself.\n\t\t\t\t\telif msg[:5] == \"close\":\n\n\t\t\t\t\t\treason = msg[6:]\n\n\t\t\t\t\t\tprint(\"This client was closed due to {}.\".format(reason))\n\t\t\t\t\t\tself.quit(True)\n\n\t\t\t\t\t#: Otherwise, print the message to the commandline.\n\t\t\t\t\telif not self.silent:\n\t\t\t\t\t\tprint('\\r' + msg, end='')\n\n\t\t\t\t\t\tprint(\"\\nYou: \", end='')\n\t\t\t\t\t\tself.displayed_you = True\n\n\t\t\t\t\t#: Remove the processed message\n\t\t\t\t\tself.messages.remove(msg)", "def messages_update(self, ht):\n for i in ht.extract_messages().iteritems():\n self.temporary(i)", "def clear_messages(self):\n with self.message_lock:\n self.messages = self.messages[self._processed_messages:]\n self._processed_messages = 0", "def messages(self):\n return Session.send_request('messages', {'token': self.token}, Session.FULL_RESPONSE_OR_NONE)", "def outbox():\n with mail.record_messages() as messages:\n yield messages", "def send_messages(self):\n if self.messages:\n messages, self.messages = self.messages, []\n self.mpub(\"events.%s\" % config.pool, messages)", "def insertall_message(self, text):\n return self.insertall([{'logging': text}])", "def characters(self, ch):\n if self.inMessageContent:\n self.message = self.message + ch\n self.messages.append(self.message)", "def messages(self):\n return list(iter(self))" ]
[ "0.6577893", "0.64629614", "0.626432", "0.6199823", "0.61209255", "0.60572773", "0.59363085", "0.57713807", "0.57195944", "0.5715277", "0.5674186", "0.56590617", "0.565153", "0.55473465", "0.55473465", "0.55084664", "0.5491512", "0.5487544", "0.5470719", "0.5464526", "0.545333", "0.542602", "0.54219425", "0.5409838", "0.53992754", "0.53923017", "0.53791136", "0.5375763", "0.53738195", "0.53627026" ]
0.7290832
0